mirror of
https://github.com/mattermost/mattermost.git
synced 2025-02-25 18:55:24 -06:00
PLT-6398: Add dependency on go elastic search library. (#6340)
This commit is contained in:
committed by
Joram Wilander
parent
b25021b912
commit
622998add1
6
glide.lock
generated
6
glide.lock
generated
@@ -1,5 +1,5 @@
|
||||
hash: 024025a74451af061cc996e2b9192bad74cfd0952515fab332e3ad2968606909
|
||||
updated: 2017-04-24T11:23:43.046980248-04:00
|
||||
hash: bec476d3a62ec36a3192e697194c749894d955f718ab1688b1e399a0691510e3
|
||||
updated: 2017-05-05T11:07:23.413127083+01:00
|
||||
imports:
|
||||
- name: github.com/alecthomas/log4go
|
||||
version: 3fbce08846379ec7f4f6bc7fce6dd01ce28fae4c
|
||||
@@ -189,6 +189,8 @@ imports:
|
||||
- rate
|
||||
- name: gopkg.in/asn1-ber.v1
|
||||
version: 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
- name: gopkg.in/olivere/elastic.v5
|
||||
version: 6f76abde0f1f149b7d6fc47aee4c54366844f775
|
||||
- name: gopkg.in/square/go-jose.v1
|
||||
version: aa2e30fdd1fe9dd3394119af66451ae790d50e0d
|
||||
subpackages:
|
||||
|
||||
11
glide.yaml
11
glide.yaml
@@ -59,9 +59,6 @@ import:
|
||||
version: v2.0.3
|
||||
subpackages:
|
||||
- store/memstore
|
||||
- package: github.com/segmentio/analytics-go
|
||||
version: 2.1.1
|
||||
- package: github.com/rsc/letsencrypt
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: v0.8.0
|
||||
subpackages:
|
||||
@@ -83,10 +80,6 @@ import:
|
||||
subpackages:
|
||||
- pbutil
|
||||
- package: github.com/prometheus/procfs
|
||||
- package: github.com/spf13/cobra
|
||||
- package: github.com/spf13/pflag
|
||||
- package: github.com/dyatlov/go-opengraph
|
||||
subpackages:
|
||||
- opengraph
|
||||
- package: github.com/cpanato/html2text
|
||||
|
||||
- package: gopkg.in/olivere/elastic.v5
|
||||
version: v5.0.36
|
||||
|
||||
31
vendor/gopkg.in/olivere/elastic.v5/.gitignore
generated
vendored
Normal file
31
vendor/gopkg.in/olivere/elastic.v5/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
/generator
|
||||
/cluster-test/cluster-test
|
||||
/cluster-test/*.log
|
||||
/cluster-test/es-chaos-monkey
|
||||
/spec
|
||||
/tmp
|
||||
/CHANGELOG-3.0.html
|
||||
|
||||
17
vendor/gopkg.in/olivere/elastic.v5/.travis.yml
generated
vendored
Normal file
17
vendor/gopkg.in/olivere/elastic.v5/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
sudo: required
|
||||
language: go
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
services:
|
||||
- docker
|
||||
before_script:
|
||||
- mkdir -p /tmp/elasticsearch/config
|
||||
- cp -r config/* /tmp/elasticsearch/config/
|
||||
- sudo sysctl -w vm.max_map_count=262144
|
||||
- docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "/tmp/elasticsearch/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.3.0 elasticsearch >& /dev/null &
|
||||
- sleep 15
|
||||
363
vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md
generated
vendored
Normal file
363
vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md
generated
vendored
Normal file
@@ -0,0 +1,363 @@
|
||||
# Elastic 3.0
|
||||
|
||||
Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes.
|
||||
|
||||
We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft.
|
||||
|
||||
So, to summarize:
|
||||
|
||||
1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained.
|
||||
2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch.
|
||||
|
||||
The rest of the document is a list of all changes in Elastic 3.0.
|
||||
|
||||
## Pointer types
|
||||
|
||||
All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example:
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
q := elastic.NewMatchAllQuery()
|
||||
res, err := elastic.Search("one").Query(&q).Do() // notice the & here
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
q := elastic.NewMatchAllQuery()
|
||||
res, err := elastic.Search("one").Query(q).Do() // no more &
|
||||
// ... which can be simplified as:
|
||||
res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do()
|
||||
```
|
||||
|
||||
It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046).
|
||||
|
||||
## Query/filter merge
|
||||
|
||||
One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`).
|
||||
|
||||
The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay!
|
||||
|
||||
Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before.
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
q := elastic.NewMatchAllQuery()
|
||||
f := elastic.NewTermFilter("tag", "important")
|
||||
res, err := elastic.Search().Index("one").Query(&q).PostFilter(f)
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
q := elastic.NewMatchAllQuery()
|
||||
f := elastic.NewTermQuery("tag", "important") // it's a query now!
|
||||
res, err := elastic.Search().Index("one").Query(q).PostFilter(f)
|
||||
```
|
||||
|
||||
## Facets are removed
|
||||
|
||||
[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now.
|
||||
|
||||
## Errors
|
||||
|
||||
Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer.
|
||||
|
||||
Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59).
|
||||
|
||||
### HTTP Status 404 (Not Found)
|
||||
|
||||
When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0.
|
||||
|
||||
Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error.
|
||||
|
||||
To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below).
|
||||
|
||||
The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0.
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
|
||||
if err != nil {
|
||||
// Something else went wrong (but 404 is NOT an error in Elastic 2.0)
|
||||
}
|
||||
if !res.Found {
|
||||
// Document has not been found
|
||||
}
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do()
|
||||
if err != nil {
|
||||
if elastic.IsNotFound(err) {
|
||||
// Document has not been found
|
||||
} else {
|
||||
// Something else went wrong
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### HTTP Status 408 (Timeouts)
|
||||
|
||||
Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API.
|
||||
|
||||
To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper.
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
|
||||
if err != nil {
|
||||
// ...
|
||||
}
|
||||
if health.TimedOut {
|
||||
// We have a timeout
|
||||
}
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do()
|
||||
if elastic.IsTimeout(err) {
|
||||
// We have a timeout
|
||||
}
|
||||
```
|
||||
|
||||
### Bulk Errors
|
||||
|
||||
The error response of a bulk operation used to be a simple string in Elasticsearch 1.x.
|
||||
In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error.
|
||||
These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206).
|
||||
|
||||
### Removed specific Elastic errors
|
||||
|
||||
The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message.
|
||||
|
||||
## Numeric types
|
||||
|
||||
Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`.
|
||||
|
||||
## Pluralization
|
||||
|
||||
Some services accept zero, one or more indices or types to operate on.
|
||||
E.g. in the `SearchService` accepts a list of zero, one, or more indices to
|
||||
search and therefor had a func called `Index(index string)` and a func
|
||||
called `Indices(indices ...string)`.
|
||||
|
||||
Elastic 3.0 now only uses the singular form that, when applicable, accepts a
|
||||
variadic type. E.g. in the case of the `SearchService`, you now only have
|
||||
one func with the following signature: `Index(indices ...string)`.
|
||||
|
||||
Notice this is only limited to `Index(...)` and `Type(...)`. There are other
|
||||
services with variadic functions. These have not been changed.
|
||||
|
||||
## Multiple calls to variadic functions
|
||||
|
||||
Some services with variadic functions have cleared the underlying slice when
|
||||
called while other services just add to the existing slice. This has now been
|
||||
normalized to always add to the underlying slice.
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
// Would only cleared scroll id "two"
|
||||
// because ScrollId cleared the values when called multiple times
|
||||
client.ClearScroll().ScrollId("one").ScrollId("two").Do()
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
// Now (correctly) clears both scroll id "one" and "two"
|
||||
// because ScrollId no longer clears the values when called multiple times
|
||||
client.ClearScroll().ScrollId("one").ScrollId("two").Do()
|
||||
```
|
||||
|
||||
## Ping service requires URL
|
||||
|
||||
The `Ping` service raised some issues because it is different from all
|
||||
other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`.
|
||||
|
||||
Users expected to ping the cluster, but that is not possible as the cluster
|
||||
can be a set of many nodes: So which node do we ping then?
|
||||
|
||||
To make it more clear, the `Ping` function on the client now requires users
|
||||
to explicitly set the URL of the node to ping.
|
||||
|
||||
## Meta fields
|
||||
|
||||
Many of the meta fields e.g. `_parent` or `_routing` are now
|
||||
[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields)
|
||||
and are no longer returned as parts of the `fields` object. We had to change
|
||||
larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0.
|
||||
|
||||
Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default).
|
||||
|
||||
## HasParentQuery / HasChildQuery
|
||||
|
||||
`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API.
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
allQ := elastic.NewMatchAllQuery()
|
||||
q := elastic.NewHasChildFilter("tweet").Query(&allQ)
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery())
|
||||
```
|
||||
|
||||
## SetBasicAuth client option
|
||||
|
||||
You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html).
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Delete-by-Query API
|
||||
|
||||
The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
|
||||
|
||||
Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404.
|
||||
|
||||
An older version of this document stated the following:
|
||||
|
||||
> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed.
|
||||
>
|
||||
> Example for Elastic 3.0 (new):
|
||||
>
|
||||
> ```go
|
||||
> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do()
|
||||
> if err == elastic.ErrPluginNotFound {
|
||||
> // Delete By Query API is not available
|
||||
> }
|
||||
> ```
|
||||
|
||||
I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch.
|
||||
|
||||
If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play.
|
||||
|
||||
## HasPlugin and SetRequiredPlugins
|
||||
|
||||
Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html).
|
||||
|
||||
You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client.
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
err, found := client.HasPlugin("delete-by-query")
|
||||
if err == nil && found {
|
||||
// ... Delete By Query API is available
|
||||
}
|
||||
```
|
||||
|
||||
To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place.
|
||||
|
||||
```go
|
||||
// Will raise an error if the "delete-by-query" plugin is NOT installed
|
||||
client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file.
|
||||
|
||||
## Common Query has been renamed to Common Terms Query
|
||||
|
||||
The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring).
|
||||
|
||||
## Remove `MoreLikeThis` and `MoreLikeThisField`
|
||||
|
||||
The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`.
|
||||
|
||||
## Remove Filtered Query
|
||||
|
||||
With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated).
|
||||
|
||||
## Remove FuzzyLikeThis and FuzzyLikeThisField
|
||||
|
||||
Both have been removed from Elasticsearch 2.0 as well.
|
||||
|
||||
## Remove LimitFilter
|
||||
|
||||
The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects.
|
||||
|
||||
## Remove `_cache` and `_cache_key` from filters
|
||||
|
||||
Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching).
|
||||
|
||||
## Partial fields are gone
|
||||
|
||||
Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html).
|
||||
|
||||
## Scripting
|
||||
|
||||
A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type.
|
||||
|
||||
Example for Elastic 2.0 (old):
|
||||
|
||||
```go
|
||||
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
|
||||
Script("ctx._source.retweets += num").
|
||||
ScriptParams(map[string]interface{}{"num": 1}).
|
||||
Upsert(map[string]interface{}{"retweets": 0}).
|
||||
Do()
|
||||
```
|
||||
|
||||
Example for Elastic 3.0 (new):
|
||||
|
||||
```go
|
||||
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
|
||||
Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)).
|
||||
Upsert(map[string]interface{}{"retweets": 0}).
|
||||
Do()
|
||||
```
|
||||
|
||||
## Cluster State
|
||||
|
||||
The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`.
|
||||
|
||||
## Unexported structs in response
|
||||
|
||||
Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example.
|
||||
|
||||
## Add offset to Histogram aggregation
|
||||
|
||||
Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option.
|
||||
|
||||
## Services
|
||||
|
||||
### REST API specification
|
||||
|
||||
As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure.
|
||||
|
||||
Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process.
|
||||
|
||||
This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes.
|
||||
|
||||
At the same time, the file names of the services are renamed to match the REST API specification naming.
|
||||
|
||||
### REST API Test Suite
|
||||
|
||||
The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well.
|
||||
|
||||
This process in not completed though.
|
||||
|
||||
|
||||
195
vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md
generated
vendored
Normal file
195
vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md
generated
vendored
Normal file
@@ -0,0 +1,195 @@
|
||||
# Changes in Elastic 5.0
|
||||
|
||||
## Enforce context.Context in PerformRequest and Do
|
||||
|
||||
We enforce the usage of `context.Context` everywhere you execute a request.
|
||||
You need to change all your `Do()` calls to pass a context: `Do(ctx)`.
|
||||
This enables automatic request cancelation and many other patterns.
|
||||
|
||||
If you don't need this, simply pass `context.TODO()` or `context.Background()`.
|
||||
|
||||
## Warmers removed
|
||||
|
||||
Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers).
|
||||
|
||||
## Optimize removed
|
||||
|
||||
Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed).
|
||||
Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead.
|
||||
|
||||
## Missing Query removed
|
||||
|
||||
The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query).
|
||||
Use `exists` query with `must_not` in `bool` query instead.
|
||||
|
||||
## And Query removed
|
||||
|
||||
The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||
Use `must` clauses in a `bool` query instead.
|
||||
|
||||
## Not Query removed
|
||||
|
||||
TODO Is it removed?
|
||||
|
||||
## Or Query removed
|
||||
|
||||
The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||
Use `should` clauses in a `bool` query instead.
|
||||
|
||||
## Filtered Query removed
|
||||
|
||||
The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||
Use `bool` query instead, which supports `filter` clauses too.
|
||||
|
||||
## Limit Query removed
|
||||
|
||||
The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed).
|
||||
Use the `terminate_after` parameter instead.
|
||||
|
||||
# Template Query removed
|
||||
|
||||
The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use
|
||||
Search Templates instead.
|
||||
|
||||
We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity
|
||||
to get rid of old stuff.
|
||||
|
||||
## `_timestamp` and `_ttl` removed
|
||||
|
||||
Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal).
|
||||
|
||||
## Search template Put/Delete API returns `acknowledged` only
|
||||
|
||||
The response type for Put/Delete search templates has changed.
|
||||
It only returns a single `acknowledged` flag now.
|
||||
|
||||
## Fields has been renamed to Stored Fields
|
||||
|
||||
The `fields` parameter has been renamed to `stored_fields`.
|
||||
See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter).
|
||||
|
||||
## Fielddatafields has been renamed to Docvaluefields
|
||||
|
||||
The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter)
|
||||
to `docvalue_fields`.
|
||||
|
||||
## Type exists endpoint changed
|
||||
|
||||
The endpoint for checking whether a type exists has been changed from
|
||||
`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`.
|
||||
See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal).
|
||||
|
||||
## Refresh parameter changed
|
||||
|
||||
The `?refresh` parameter previously could be a boolean value. It indicated
|
||||
whether changes made by a request (e.g. by the Bulk API) should be immediately
|
||||
visible in search, or not. Using `refresh=true` had the positive effect of
|
||||
immediately seeing the changes when searching; the negative effect is that
|
||||
it is a rather big performance hit.
|
||||
|
||||
With 5.0, you now have the choice between these 3 values.
|
||||
|
||||
* `"true"` - Refresh immediately
|
||||
* `"false"` - Do not refresh (the default value)
|
||||
* `"wait_for"` - Wait until ES made the document visible in search
|
||||
|
||||
See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation.
|
||||
|
||||
Notice that `true` and `false` (the boolean values) are no longer available
|
||||
now in Elastic. You must use a string instead, with one of the above values.
|
||||
|
||||
## ReindexerService removed
|
||||
|
||||
The `ReindexerService` was a custom solution that was started in the ES 1.x era
|
||||
to automate reindexing data, from one index to another or even between clusters.
|
||||
|
||||
ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html)
|
||||
so we're going to remove our custom solution and ask you to use the native reindexer.
|
||||
|
||||
The `ReindexService` is available via `client.Reindex()` (which used to point
|
||||
to the custom reindexer).
|
||||
|
||||
## Delete By Query back in core
|
||||
|
||||
The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html)
|
||||
was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API.
|
||||
|
||||
It has it's own endpoint at `/_delete_by_query`.
|
||||
|
||||
Delete By Query, Reindex, and Update By Query are very similar under the hood.
|
||||
|
||||
## Reindex, Delete By Query, and Update By Query response changed
|
||||
|
||||
The response from the above APIs changed a bit. E.g. the `retries` value
|
||||
used to be an `int64` and returns separate values for `bulk` and `search` now:
|
||||
|
||||
```
|
||||
// Old
|
||||
{
|
||||
...
|
||||
"retries": 123,
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
// New
|
||||
{
|
||||
...
|
||||
"retries": {
|
||||
"bulk": 123,
|
||||
"search": 0
|
||||
},
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## ScanService removed
|
||||
|
||||
The `ScanService` is removed. Use the (new) `ScrollService` instead.
|
||||
|
||||
## New ScrollService
|
||||
|
||||
There was confusion around `ScanService` and `ScrollService` doing basically
|
||||
the same. One was returning slices and didn't support all query details, the
|
||||
other returned one document after another and wasn't safe for concurrent use.
|
||||
So we merged the two and merged it into a new `ScrollService` that
|
||||
removes all the problems with the older services.
|
||||
|
||||
In other words:
|
||||
If you used `ScanService`, switch to `ScrollService`.
|
||||
If you used the old `ScrollService`, you might need to fix some things but
|
||||
overall it should just work.
|
||||
|
||||
Changes:
|
||||
- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll".
|
||||
|
||||
TODO Not implemented yet
|
||||
|
||||
## Suggesters
|
||||
|
||||
They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html).
|
||||
|
||||
Some changes:
|
||||
- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing).
|
||||
|
||||
TODO Fix all structural changes in suggesters
|
||||
|
||||
## Percolator
|
||||
|
||||
Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html).
|
||||
|
||||
Elastic 5.0 adds the new
|
||||
[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html)
|
||||
which can be used in combination with the new
|
||||
[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html).
|
||||
|
||||
The Percolate service is removed from Elastic 5.0.
|
||||
|
||||
## Remove Consistency, add WaitForActiveShards
|
||||
|
||||
The `consistency` parameter has been removed in a lot of places, e.g. the Bulk,
|
||||
Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API.
|
||||
|
||||
It has been replaced by a somewhat similar `wait_for_active_shards` parameter.
|
||||
See https://github.com/elastic/elasticsearch/pull/19454.
|
||||
40
vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md
generated
vendored
Normal file
40
vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
# How to contribute
|
||||
|
||||
Elastic is an open-source project and we are looking forward to each
|
||||
contribution.
|
||||
|
||||
Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level
|
||||
overview of the features of Elasticsearch. However, Elastic tries to resemble
|
||||
the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch).
|
||||
|
||||
This explains why you might think that some options are strange or missing
|
||||
in Elastic, while often they're just different. Please check the Java API first.
|
||||
|
||||
Having said that: Elasticsearch is moving fast and it might be very likely
|
||||
that we missed some features or changes. Feel free to change that.
|
||||
|
||||
## Your Pull Request
|
||||
|
||||
To make it easy to review and understand your changes, please keep the
|
||||
following things in mind before submitting your pull request:
|
||||
|
||||
* You compared the existing implemenation with the Java API, did you?
|
||||
* Please work on the latest possible state of `olivere/elastic`.
|
||||
Use `release-branch.v2` for targeting Elasticsearch 1.x and
|
||||
`release-branch.v3` for targeting 2.x.
|
||||
* Create a branch dedicated to your change.
|
||||
* If possible, write a test case which confirms your change.
|
||||
* Make sure your changes and your tests work with all recent versions of
|
||||
Elasticsearch. We currently support Elasticsearch 1.7.x in the
|
||||
release-branch.v2 and Elasticsearch 2.x in the release-branch.v3.
|
||||
* Test your changes before creating a pull request (`go test ./...`).
|
||||
* Don't mix several features or bug fixes in one pull request.
|
||||
* Create a meaningful commit message.
|
||||
* Explain your change, e.g. provide a link to the issue you are fixing and
|
||||
probably a link to the Elasticsearch documentation and/or source code.
|
||||
* Format your source with `go fmt`.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
* [GitHub documentation](http://help.github.com/)
|
||||
* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)
|
||||
93
vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS
generated
vendored
Normal file
93
vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
# This is a list of people who have contributed code
|
||||
# to the Elastic repository.
|
||||
#
|
||||
# It is just my small "thank you" to all those that helped
|
||||
# making Elastic what it is.
|
||||
#
|
||||
# Please keep this list sorted.
|
||||
|
||||
0x6875790d0a [@huydx](https://github.com/huydx)
|
||||
Adam Alix [@adamalix](https://github.com/adamalix)
|
||||
Adam Weiner [@adamweiner](https://github.com/adamweiner)
|
||||
Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu)
|
||||
Alex [@akotlar](https://github.com/akotlar)
|
||||
Alexandre Olivier [@aliphen](https://github.com/aliphen)
|
||||
Alexey Sharov [@nizsheanez](https://github.com/nizsheanez)
|
||||
AndreKR [@AndreKR](https://github.com/AndreKR)
|
||||
Andrew Dunham [@andrew-d](https://github.com/andrew-d)
|
||||
Andrew Gaul [@andrewgaul](https://github.com/andrewgaul)
|
||||
Benjamin Fernandes [@LotharSee](https://github.com/LotharSee)
|
||||
Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux)
|
||||
Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va)
|
||||
Brady Love [@bradylove](https://github.com/bradylove)
|
||||
Bryan Conklin [@bmconklin](https://github.com/bmconklin)
|
||||
Bruce Zhou [@brucez-isell](https://github.com/brucez-isell)
|
||||
Chris M [@tebriel](https://github.com/tebriel)
|
||||
Christophe Courtaut [@kri5](https://github.com/kri5)
|
||||
Conrad Pankoff [@deoxxa](https://github.com/deoxxa)
|
||||
Corey Scott [@corsc](https://github.com/corsc)
|
||||
Daniel Barrett [@shendaras](https://github.com/shendaras)
|
||||
Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath)
|
||||
Daniel Imfeld [@dimfeld](https://github.com/dimfeld)
|
||||
Dwayne Schultz [@myshkin5](https://github.com/myshkin5)
|
||||
Ellison Leão [@ellisonleao](https://github.com/ellisonleao)
|
||||
Erwin [@eticzon](https://github.com/eticzon)
|
||||
Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov)
|
||||
Faolan C-P [@fcheslack](https://github.com/fcheslack)
|
||||
Gerhard Häring [@ghaering](https://github.com/ghaering)
|
||||
Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos)
|
||||
Guillaume J. Charmes [@creack](https://github.com/creack)
|
||||
Guiseppe [@gm42](https://github.com/gm42)
|
||||
Han Yu [@MoonighT](https://github.com/MoonighT)
|
||||
Harrison Wright [@wright8191](https://github.com/wright8191)
|
||||
Henry Clifford [@hcliff](https://github.com/hcliff)
|
||||
Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy)
|
||||
initialcontext [@initialcontext](https://github.com/initialcontext)
|
||||
Isaac Saldana [@isaldana](https://github.com/isaldana)
|
||||
Jack Lindamood [@cep21](https://github.com/cep21)
|
||||
Jacob [@jdelgad](https://github.com/jdelgad)
|
||||
Jayme Rotsaert [@jrots](https://github.com/jrots)
|
||||
Jeremy Canady [@jrmycanady](https://github.com/jrmycanady)
|
||||
Joe Buck [@four2five](https://github.com/four2five)
|
||||
John Barker [@j16r](https://github.com/j16r)
|
||||
John Goodall [@jgoodall](https://github.com/jgoodall)
|
||||
John Stanford [@jxstanford](https://github.com/jxstanford)
|
||||
jun [@coseyo](https://github.com/coseyo)
|
||||
Junpei Tsuji [@jun06t](https://github.com/jun06t)
|
||||
Kenta SUZUKI [@suzuken](https://github.com/suzuken)
|
||||
Kyle Brandt [@kylebrandt](https://github.com/kylebrandt)
|
||||
Leandro Piccilli [@lpic10](https://github.com/lpic10)
|
||||
Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh)
|
||||
Mara Kim [@autochthe](https://github.com/autochthe)
|
||||
Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato)
|
||||
Mark Costello [@mcos](https://github.com/mcos)
|
||||
Martin Häger [@protomouse](https://github.com/protomouse)
|
||||
Medhi Bechina [@mdzor](https://github.com/mdzor)
|
||||
mosa [@mosasiru](https://github.com/mosasiru)
|
||||
naimulhaider [@naimulhaider](https://github.com/naimulhaider)
|
||||
Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn)
|
||||
navins [@ishare](https://github.com/ishare)
|
||||
Naoya Tsutsumi [@tutuming](https://github.com/tutuming)
|
||||
Nicholas Wolff [@nwolff](https://github.com/nwolff)
|
||||
Nick K [@utrack](https://github.com/utrack)
|
||||
Nick Whyte [@nickw444](https://github.com/nickw444)
|
||||
Orne Brocaar [@brocaar](https://github.com/brocaar)
|
||||
Radoslaw Wesolowski [r--w](https://github.com/r--w)
|
||||
Ryan Schmukler [@rschmukler](https://github.com/rschmukler)
|
||||
Sacheendra talluri [@sacheendra](https://github.com/sacheendra)
|
||||
Sean DuBois [@Sean-Der](https://github.com/Sean-Der)
|
||||
Shalin LK [@shalinlk](https://github.com/shalinlk)
|
||||
Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic)
|
||||
Stuart Warren [@Woz](https://github.com/stuart-warren)
|
||||
Sulaiman [@salajlan](https://github.com/salajlan)
|
||||
Sundar [@sundarv85](https://github.com/sundarv85)
|
||||
Take [ww24](https://github.com/ww24)
|
||||
Tetsuya Morimoto [@t2y](https://github.com/t2y)
|
||||
TimeEmit [@TimeEmit](https://github.com/timeemit)
|
||||
TusharM [@tusharm](https://github.com/tusharm)
|
||||
wangtuo [@wangtuo](https://github.com/wangtuo)
|
||||
wolfkdy [@wolfkdy](https://github.com/wolfkdy)
|
||||
Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb)
|
||||
Yarden Bar [@ayashjorden](https://github.com/ayashjorden)
|
||||
zakthomas [@zakthomas](https://github.com/zakthomas)
|
||||
singham [@zhaochenxiao90](https://github.com/zhaochenxiao90)
|
||||
17
vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md
generated
vendored
Normal file
17
vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
Please use the following questions as a guideline to help me answer
|
||||
your issue/question without further inquiry. Thank you.
|
||||
|
||||
### Which version of Elastic are you using?
|
||||
|
||||
[ ] elastic.v2 (for Elasticsearch 1.x)
|
||||
[ ] elastic.v3 (for Elasticsearch 2.x)
|
||||
[ ] elastic.v5 (for Elasticsearch 5.x)
|
||||
|
||||
### Please describe the expected behavior
|
||||
|
||||
|
||||
### Please describe the actual behavior
|
||||
|
||||
|
||||
### Any steps to reproduce the behavior?
|
||||
|
||||
20
vendor/gopkg.in/olivere/elastic.v5/LICENSE
generated
vendored
Normal file
20
vendor/gopkg.in/olivere/elastic.v5/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
Copyright © 2012-2015 Oliver Eilhard
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the “Software”), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
467
vendor/gopkg.in/olivere/elastic.v5/README.md
generated
vendored
Normal file
467
vendor/gopkg.in/olivere/elastic.v5/README.md
generated
vendored
Normal file
@@ -0,0 +1,467 @@
|
||||
# Elastic
|
||||
|
||||
Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
|
||||
[Go](http://www.golang.org/) programming language.
|
||||
|
||||
[](https://travis-ci.org/olivere/elastic)
|
||||
[](http://godoc.org/gopkg.in/olivere/elastic.v5)
|
||||
[](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
|
||||
|
||||
See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
|
||||
|
||||
|
||||
## Releases
|
||||
|
||||
**The release branches (e.g. [`release-branch.v5`](https://github.com/olivere/elastic/tree/release-branch.v5))
|
||||
are actively being worked on and can break at any time.
|
||||
If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).**
|
||||
|
||||
Here's the version matrix:
|
||||
|
||||
Elasticsearch version | Elastic version -| Package URL
|
||||
----------------------|------------------|------------
|
||||
5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5))
|
||||
2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3))
|
||||
1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2))
|
||||
0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1))
|
||||
|
||||
**Example:**
|
||||
|
||||
You have installed Elasticsearch 5.0.0 and want to use Elastic.
|
||||
As listed above, you should use Elastic 5.0.
|
||||
So you first install the stable release of Elastic 5.0 from gopkg.in.
|
||||
|
||||
```sh
|
||||
$ go get gopkg.in/olivere/elastic.v5
|
||||
```
|
||||
|
||||
You then import it with this import path:
|
||||
|
||||
```go
|
||||
import elastic "gopkg.in/olivere/elastic.v5"
|
||||
```
|
||||
|
||||
### Elastic 5.0
|
||||
|
||||
Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was
|
||||
[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released).
|
||||
|
||||
Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html)
|
||||
and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md)
|
||||
as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x).
|
||||
|
||||
Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack.
|
||||
|
||||
### Elastic 3.0
|
||||
|
||||
Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3).
|
||||
|
||||
Elastic 3.0 will only get critical bug fixes. You should update to a recent version.
|
||||
|
||||
### Elastic 2.0
|
||||
|
||||
Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2).
|
||||
|
||||
Elastic 2.0 will only get critical bug fixes. You should update to a recent version.
|
||||
|
||||
### Elastic 1.0
|
||||
|
||||
Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic
|
||||
to a recent version.
|
||||
|
||||
However, if you cannot update for some reason, don't worry. Version 1.0 is
|
||||
still available. All you need to do is go-get it and change your import path
|
||||
as described above.
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
We use Elastic in production since 2012. Elastic is stable but the API changes
|
||||
now and then. We strive for API compatibility.
|
||||
However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html)
|
||||
and we sometimes have to adapt.
|
||||
|
||||
Having said that, there have been no big API changes that required you
|
||||
to rewrite your application big time. More often than not it's renaming APIs
|
||||
and adding/removing features so that Elastic is in sync with Elasticsearch.
|
||||
|
||||
Elastic has been used in production with the following Elasticsearch versions:
|
||||
0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/)
|
||||
to test Elastic with the most recent versions of Elasticsearch and Go.
|
||||
See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml)
|
||||
file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic)
|
||||
for the results.
|
||||
|
||||
Elasticsearch has quite a few features. Most of them are implemented
|
||||
by Elastic. I add features and APIs as required. It's straightforward
|
||||
to implement missing pieces. I'm accepting pull requests :-)
|
||||
|
||||
Having said that, I hope you find the project useful.
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go).
|
||||
The client connects to Elasticsearch on `http://127.0.0.1:9200` by default.
|
||||
|
||||
You typically create one client for your app. Here's a complete example of
|
||||
creating a client, creating an index, adding a document, executing a search etc.
|
||||
|
||||
```go
|
||||
// Create a context
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a client
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create an index
|
||||
_, err = client.CreateIndex("twitter").Do(ctx)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Add a document to the index
|
||||
tweet := Tweet{User: "olivere", Message: "Take Five"}
|
||||
_, err = client.Index().
|
||||
Index("twitter").
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
BodyJson(tweet).
|
||||
Refresh("true").
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Search with a term query
|
||||
termQuery := elastic.NewTermQuery("user", "olivere")
|
||||
searchResult, err := client.Search().
|
||||
Index("twitter"). // search in index "twitter"
|
||||
Query(termQuery). // specify the query
|
||||
Sort("user", true). // sort by "user" field, ascending
|
||||
From(0).Size(10). // take documents 0-9
|
||||
Pretty(true). // pretty print request and response JSON
|
||||
Do(ctx) // execute
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// searchResult is of type SearchResult and returns hits, suggestions,
|
||||
// and all kinds of other information from Elasticsearch.
|
||||
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
|
||||
|
||||
// Each is a convenience function that iterates over hits in a search result.
|
||||
// It makes sure you don't need to check for nil values in the response.
|
||||
// However, it ignores errors in serialization. If you want full control
|
||||
// over iterating the hits, see below.
|
||||
var ttyp Tweet
|
||||
for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
|
||||
if t, ok := item.(Tweet); ok {
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
}
|
||||
// TotalHits is another convenience function that works even when something goes wrong.
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
|
||||
|
||||
// Here's how you iterate through results with full control over each step.
|
||||
if searchResult.Hits.TotalHits > 0 {
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
|
||||
|
||||
// Iterate through results
|
||||
for _, hit := range searchResult.Hits.Hits {
|
||||
// hit.Index contains the name of the index
|
||||
|
||||
// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
|
||||
var t Tweet
|
||||
err := json.Unmarshal(*hit.Source, &t)
|
||||
if err != nil {
|
||||
// Deserialization failed
|
||||
}
|
||||
|
||||
// Work with tweet
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
} else {
|
||||
// No hits
|
||||
fmt.Print("Found no tweets\n")
|
||||
}
|
||||
|
||||
// Delete the index again
|
||||
_, err = client.DeleteIndex("twitter").Do(ctx)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
Here's a [link to a complete working example](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263).
|
||||
|
||||
See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
|
||||
|
||||
|
||||
## API Status
|
||||
|
||||
### Document APIs
|
||||
|
||||
- [x] Index API
|
||||
- [x] Get API
|
||||
- [x] Delete API
|
||||
- [x] Delete By Query API
|
||||
- [x] Update API
|
||||
- [x] Update By Query API
|
||||
- [x] Multi Get API
|
||||
- [x] Bulk API
|
||||
- [x] Reindex API
|
||||
- [x] Term Vectors
|
||||
- [x] Multi termvectors API
|
||||
|
||||
### Search APIs
|
||||
|
||||
- [x] Search
|
||||
- [x] Search Template
|
||||
- [ ] Multi Search Template
|
||||
- [ ] Search Shards API
|
||||
- [x] Suggesters
|
||||
- [x] Term Suggester
|
||||
- [x] Phrase Suggester
|
||||
- [x] Completion Suggester
|
||||
- [x] Context Suggester
|
||||
- [x] Multi Search API
|
||||
- [x] Count API
|
||||
- [ ] Search Exists API
|
||||
- [ ] Validate API
|
||||
- [x] Explain API
|
||||
- [x] Profile API
|
||||
- [x] Field Stats API
|
||||
|
||||
### Aggregations
|
||||
|
||||
- Metrics Aggregations
|
||||
- [x] Avg
|
||||
- [x] Cardinality
|
||||
- [x] Extended Stats
|
||||
- [x] Geo Bounds
|
||||
- [ ] Geo Centroid
|
||||
- [x] Max
|
||||
- [x] Min
|
||||
- [x] Percentiles
|
||||
- [x] Percentile Ranks
|
||||
- [ ] Scripted Metric
|
||||
- [x] Stats
|
||||
- [x] Sum
|
||||
- [x] Top Hits
|
||||
- [x] Value Count
|
||||
- Bucket Aggregations
|
||||
- [x] Children
|
||||
- [x] Date Histogram
|
||||
- [x] Date Range
|
||||
- [x] Filter
|
||||
- [x] Filters
|
||||
- [x] Geo Distance
|
||||
- [ ] GeoHash Grid
|
||||
- [x] Global
|
||||
- [x] Histogram
|
||||
- [x] IP Range
|
||||
- [x] Missing
|
||||
- [x] Nested
|
||||
- [x] Range
|
||||
- [x] Reverse Nested
|
||||
- [x] Sampler
|
||||
- [x] Significant Terms
|
||||
- [x] Terms
|
||||
- Pipeline Aggregations
|
||||
- [x] Avg Bucket
|
||||
- [x] Derivative
|
||||
- [x] Max Bucket
|
||||
- [x] Min Bucket
|
||||
- [x] Sum Bucket
|
||||
- [x] Stats Bucket
|
||||
- [ ] Extended Stats Bucket
|
||||
- [ ] Percentiles Bucket
|
||||
- [x] Moving Average
|
||||
- [x] Cumulative Sum
|
||||
- [x] Bucket Script
|
||||
- [x] Bucket Selector
|
||||
- [x] Serial Differencing
|
||||
- [x] Matrix Aggregations
|
||||
- [x] Matrix Stats
|
||||
- [x] Aggregation Metadata
|
||||
|
||||
### Indices APIs
|
||||
|
||||
- [x] Create Index
|
||||
- [x] Delete Index
|
||||
- [x] Get Index
|
||||
- [x] Indices Exists
|
||||
- [x] Open / Close Index
|
||||
- [x] Shrink Index
|
||||
- [x] Rollover Index
|
||||
- [x] Put Mapping
|
||||
- [x] Get Mapping
|
||||
- [x] Get Field Mapping
|
||||
- [x] Types Exists
|
||||
- [x] Index Aliases
|
||||
- [x] Update Indices Settings
|
||||
- [x] Get Settings
|
||||
- [x] Analyze
|
||||
- [x] Index Templates
|
||||
- [ ] Shadow Replica Indices
|
||||
- [x] Indices Stats
|
||||
- [ ] Indices Segments
|
||||
- [ ] Indices Recovery
|
||||
- [ ] Indices Shard Stores
|
||||
- [ ] Clear Cache
|
||||
- [x] Flush
|
||||
- [x] Refresh
|
||||
- [x] Force Merge
|
||||
- [ ] Upgrade
|
||||
|
||||
### cat APIs
|
||||
|
||||
The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line.
|
||||
|
||||
- [ ] cat aliases
|
||||
- [ ] cat allocation
|
||||
- [ ] cat count
|
||||
- [ ] cat fielddata
|
||||
- [ ] cat health
|
||||
- [ ] cat indices
|
||||
- [ ] cat master
|
||||
- [ ] cat nodeattrs
|
||||
- [ ] cat nodes
|
||||
- [ ] cat pending tasks
|
||||
- [ ] cat plugins
|
||||
- [ ] cat recovery
|
||||
- [ ] cat repositories
|
||||
- [ ] cat thread pool
|
||||
- [ ] cat shards
|
||||
- [ ] cat segments
|
||||
- [ ] cat snapshots
|
||||
|
||||
### Cluster APIs
|
||||
|
||||
- [x] Cluster Health
|
||||
- [x] Cluster State
|
||||
- [x] Cluster Stats
|
||||
- [ ] Pending Cluster Tasks
|
||||
- [ ] Cluster Reroute
|
||||
- [ ] Cluster Update Settings
|
||||
- [x] Nodes Stats
|
||||
- [x] Nodes Info
|
||||
- [x] Task Management API
|
||||
- [ ] Nodes hot_threads
|
||||
- [ ] Cluster Allocation Explain API
|
||||
|
||||
### Query DSL
|
||||
|
||||
- [x] Match All Query
|
||||
- [x] Inner hits
|
||||
- Full text queries
|
||||
- [x] Match Query
|
||||
- [x] Match Phrase Query
|
||||
- [x] Match Phrase Prefix Query
|
||||
- [x] Multi Match Query
|
||||
- [x] Common Terms Query
|
||||
- [x] Query String Query
|
||||
- [x] Simple Query String Query
|
||||
- Term level queries
|
||||
- [x] Term Query
|
||||
- [x] Terms Query
|
||||
- [x] Range Query
|
||||
- [x] Exists Query
|
||||
- [x] Prefix Query
|
||||
- [x] Wildcard Query
|
||||
- [x] Regexp Query
|
||||
- [x] Fuzzy Query
|
||||
- [x] Type Query
|
||||
- [x] Ids Query
|
||||
- Compound queries
|
||||
- [x] Constant Score Query
|
||||
- [x] Bool Query
|
||||
- [x] Dis Max Query
|
||||
- [x] Function Score Query
|
||||
- [x] Boosting Query
|
||||
- [x] Indices Query
|
||||
- Joining queries
|
||||
- [x] Nested Query
|
||||
- [x] Has Child Query
|
||||
- [x] Has Parent Query
|
||||
- [x] Parent Id Query
|
||||
- Geo queries
|
||||
- [ ] GeoShape Query
|
||||
- [x] Geo Bounding Box Query
|
||||
- [x] Geo Distance Query
|
||||
- [ ] Geo Distance Range Query
|
||||
- [x] Geo Polygon Query
|
||||
- [ ] Geohash Cell Query
|
||||
- Specialized queries
|
||||
- [x] More Like This Query
|
||||
- [x] Template Query
|
||||
- [x] Script Query
|
||||
- [x] Percolate Query
|
||||
- Span queries
|
||||
- [ ] Span Term Query
|
||||
- [ ] Span Multi Term Query
|
||||
- [ ] Span First Query
|
||||
- [ ] Span Near Query
|
||||
- [ ] Span Or Query
|
||||
- [ ] Span Not Query
|
||||
- [ ] Span Containing Query
|
||||
- [ ] Span Within Query
|
||||
- [ ] Span Field Masking Query
|
||||
- [ ] Minimum Should Match
|
||||
- [ ] Multi Term Query Rewrite
|
||||
|
||||
### Modules
|
||||
|
||||
- Snapshot and Restore
|
||||
- [x] Repositories
|
||||
- [ ] Snapshot
|
||||
- [ ] Restore
|
||||
- [ ] Snapshot status
|
||||
- [ ] Monitoring snapshot/restore status
|
||||
- [ ] Stopping currently running snapshot and restore
|
||||
|
||||
### Sorting
|
||||
|
||||
- [x] Sort by score
|
||||
- [x] Sort by field
|
||||
- [x] Sort by geo distance
|
||||
- [x] Sort by script
|
||||
- [x] Sort by doc
|
||||
|
||||
### Scrolling
|
||||
|
||||
Scrolling is supported via a `ScrollService`. It supports an iterator-like interface.
|
||||
The `ClearScroll` API is implemented as well.
|
||||
|
||||
A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel)
|
||||
is described in the [Wiki](https://github.com/olivere/elastic/wiki).
|
||||
|
||||
## How to contribute
|
||||
|
||||
Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
|
||||
|
||||
## Credits
|
||||
|
||||
Thanks a lot for the great folks working hard on
|
||||
[Elasticsearch](https://www.elastic.co/products/elasticsearch)
|
||||
and
|
||||
[Go](https://golang.org/).
|
||||
|
||||
Elastic uses portions of the
|
||||
[uritemplates](https://github.com/jtacoma/uritemplates) library
|
||||
by Joshua Tacoma and
|
||||
[backoff](https://github.com/cenkalti/backoff) by Cenk Altı.
|
||||
|
||||
## LICENSE
|
||||
|
||||
MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
|
||||
or the LICENSE file provided in the repository for details.
|
||||
11
vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
generated
vendored
Normal file
11
vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
// AcknowledgedResponse is returned from various APIs. It simply indicates
|
||||
// whether the operation is ack'd or not.
|
||||
type AcknowledgedResponse struct {
|
||||
Acknowledged bool `json:"acknowledged"`
|
||||
}
|
||||
148
vendor/gopkg.in/olivere/elastic.v5/backoff.go
generated
vendored
Normal file
148
vendor/gopkg.in/olivere/elastic.v5/backoff.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackoffFunc specifies the signature of a function that returns the
|
||||
// time to wait before the next call to a resource. To stop retrying
|
||||
// return false in the 2nd return value.
|
||||
type BackoffFunc func(retry int) (time.Duration, bool)
|
||||
|
||||
// Backoff allows callers to implement their own Backoff strategy.
|
||||
type Backoff interface {
|
||||
// Next implements a BackoffFunc.
|
||||
Next(retry int) (time.Duration, bool)
|
||||
}
|
||||
|
||||
// -- ZeroBackoff --
|
||||
|
||||
// ZeroBackoff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting,
|
||||
// indefinitely.
|
||||
type ZeroBackoff struct{}
|
||||
|
||||
// Next implements BackoffFunc for ZeroBackoff.
|
||||
func (b ZeroBackoff) Next(retry int) (time.Duration, bool) {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
// -- StopBackoff --
|
||||
|
||||
// StopBackoff is a fixed backoff policy that always returns false for
|
||||
// Next(), meaning that the operation should never be retried.
|
||||
type StopBackoff struct{}
|
||||
|
||||
// Next implements BackoffFunc for StopBackoff.
|
||||
func (b StopBackoff) Next(retry int) (time.Duration, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// -- ConstantBackoff --
|
||||
|
||||
// ConstantBackoff is a backoff policy that always returns the same delay.
|
||||
type ConstantBackoff struct {
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
// NewConstantBackoff returns a new ConstantBackoff.
|
||||
func NewConstantBackoff(interval time.Duration) *ConstantBackoff {
|
||||
return &ConstantBackoff{interval: interval}
|
||||
}
|
||||
|
||||
// Next implements BackoffFunc for ConstantBackoff.
|
||||
func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) {
|
||||
return b.interval, true
|
||||
}
|
||||
|
||||
// -- Exponential --
|
||||
|
||||
// ExponentialBackoff implements the simple exponential backoff described by
|
||||
// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html.
|
||||
type ExponentialBackoff struct {
|
||||
t float64 // initial timeout (in msec)
|
||||
f float64 // exponential factor (e.g. 2)
|
||||
m float64 // maximum timeout (in msec)
|
||||
}
|
||||
|
||||
// NewExponentialBackoff returns a ExponentialBackoff backoff policy.
|
||||
// Use initialTimeout to set the first/minimal interval
|
||||
// and maxTimeout to set the maximum wait interval.
|
||||
func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff {
|
||||
return &ExponentialBackoff{
|
||||
t: float64(int64(initialTimeout / time.Millisecond)),
|
||||
f: 2.0,
|
||||
m: float64(int64(maxTimeout / time.Millisecond)),
|
||||
}
|
||||
}
|
||||
|
||||
// Next implements BackoffFunc for ExponentialBackoff.
|
||||
func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) {
|
||||
r := 1.0 + rand.Float64() // random number in [1..2]
|
||||
m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m)
|
||||
if m >= b.m {
|
||||
return 0, false
|
||||
}
|
||||
d := time.Duration(int64(m)) * time.Millisecond
|
||||
return d, true
|
||||
}
|
||||
|
||||
// -- Simple Backoff --
|
||||
|
||||
// SimpleBackoff takes a list of fixed values for backoff intervals.
|
||||
// Each call to Next returns the next value from that fixed list.
|
||||
// After each value is returned, subsequent calls to Next will only return
|
||||
// the last element. The values are optionally "jittered" (off by default).
|
||||
type SimpleBackoff struct {
|
||||
sync.Mutex
|
||||
ticks []int
|
||||
jitter bool
|
||||
}
|
||||
|
||||
// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified
|
||||
// list of fixed intervals in milliseconds.
|
||||
func NewSimpleBackoff(ticks ...int) *SimpleBackoff {
|
||||
return &SimpleBackoff{
|
||||
ticks: ticks,
|
||||
jitter: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Jitter enables or disables jittering values.
|
||||
func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff {
|
||||
b.Lock()
|
||||
b.jitter = flag
|
||||
b.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis].
|
||||
func jitter(millis int) int {
|
||||
if millis <= 0 {
|
||||
return 0
|
||||
}
|
||||
return millis/2 + rand.Intn(millis)
|
||||
}
|
||||
|
||||
// Next implements BackoffFunc for SimpleBackoff.
|
||||
func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
if retry >= len(b.ticks) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
ms := b.ticks[retry]
|
||||
if b.jitter {
|
||||
ms = jitter(ms)
|
||||
}
|
||||
return time.Duration(ms) * time.Millisecond, true
|
||||
}
|
||||
142
vendor/gopkg.in/olivere/elastic.v5/backoff_test.go
generated
vendored
Normal file
142
vendor/gopkg.in/olivere/elastic.v5/backoff_test.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestZeroBackoff(t *testing.T) {
|
||||
b := ZeroBackoff{}
|
||||
_, ok := b.Next(0)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopBackoff(t *testing.T) {
|
||||
b := StopBackoff{}
|
||||
_, ok := b.Next(0)
|
||||
if ok {
|
||||
t.Fatalf("expected %v, got %v", false, ok)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstantBackoff(t *testing.T) {
|
||||
b := NewConstantBackoff(time.Second)
|
||||
d, ok := b.Next(0)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
if d != time.Second {
|
||||
t.Fatalf("expected %v, got %v", time.Second, d)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleBackoff(t *testing.T) {
|
||||
var tests = []struct {
|
||||
Duration time.Duration
|
||||
Continue bool
|
||||
}{
|
||||
// #0
|
||||
{
|
||||
Duration: 1 * time.Millisecond,
|
||||
Continue: true,
|
||||
},
|
||||
// #1
|
||||
{
|
||||
Duration: 2 * time.Millisecond,
|
||||
Continue: true,
|
||||
},
|
||||
// #2
|
||||
{
|
||||
Duration: 7 * time.Millisecond,
|
||||
Continue: true,
|
||||
},
|
||||
// #3
|
||||
{
|
||||
Duration: 0,
|
||||
Continue: false,
|
||||
},
|
||||
// #4
|
||||
{
|
||||
Duration: 0,
|
||||
Continue: false,
|
||||
},
|
||||
}
|
||||
|
||||
b := NewSimpleBackoff(1, 2, 7)
|
||||
|
||||
for i, tt := range tests {
|
||||
d, ok := b.Next(i)
|
||||
if got, want := ok, tt.Continue; got != want {
|
||||
t.Fatalf("#%d: expected %v, got %v", i, want, got)
|
||||
}
|
||||
if got, want := d, tt.Duration; got != want {
|
||||
t.Fatalf("#%d: expected %v, got %v", i, want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExponentialBackoff(t *testing.T) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
min := time.Duration(8) * time.Millisecond
|
||||
max := time.Duration(256) * time.Millisecond
|
||||
b := NewExponentialBackoff(min, max)
|
||||
|
||||
between := func(value time.Duration, a, b int) bool {
|
||||
x := int(value / time.Millisecond)
|
||||
return a <= x && x <= b
|
||||
}
|
||||
|
||||
got, ok := b.Next(0)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
if !between(got, 8, 256) {
|
||||
t.Errorf("expected [%v..%v], got %v", 8, 256, got)
|
||||
}
|
||||
|
||||
got, ok = b.Next(1)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
if !between(got, 8, 256) {
|
||||
t.Errorf("expected [%v..%v], got %v", 8, 256, got)
|
||||
}
|
||||
|
||||
got, ok = b.Next(2)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
if !between(got, 8, 256) {
|
||||
t.Errorf("expected [%v..%v], got %v", 8, 256, got)
|
||||
}
|
||||
|
||||
got, ok = b.Next(3)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
if !between(got, 8, 256) {
|
||||
t.Errorf("expected [%v..%v], got %v", 8, 256, got)
|
||||
}
|
||||
|
||||
got, ok = b.Next(4)
|
||||
if !ok {
|
||||
t.Fatalf("expected %v, got %v", true, ok)
|
||||
}
|
||||
if !between(got, 8, 256) {
|
||||
t.Errorf("expected [%v..%v], got %v", 8, 256, got)
|
||||
}
|
||||
|
||||
got, ok = b.Next(5)
|
||||
if ok {
|
||||
t.Fatalf("expected %v, got %v", false, ok)
|
||||
}
|
||||
|
||||
got, ok = b.Next(6)
|
||||
if ok {
|
||||
t.Fatalf("expected %v, got %v", false, ok)
|
||||
}
|
||||
}
|
||||
396
vendor/gopkg.in/olivere/elastic.v5/bulk.go
generated
vendored
Normal file
396
vendor/gopkg.in/olivere/elastic.v5/bulk.go
generated
vendored
Normal file
@@ -0,0 +1,396 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// BulkService allows for batching bulk requests and sending them to
|
||||
// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest,
|
||||
// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch,
|
||||
// then use Do to send them to Elasticsearch.
|
||||
//
|
||||
// BulkService will be reset after each Do call. In other words, you can
|
||||
// reuse BulkService to send many batches. You do not have to create a new
|
||||
// BulkService for each batch.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for more details.
|
||||
type BulkService struct {
|
||||
client *Client
|
||||
|
||||
index string
|
||||
typ string
|
||||
requests []BulkableRequest
|
||||
pipeline string
|
||||
timeout string
|
||||
refresh string
|
||||
routing string
|
||||
waitForActiveShards string
|
||||
pretty bool
|
||||
|
||||
// estimated bulk size in bytes, up to the request index sizeInBytesCursor
|
||||
sizeInBytes int64
|
||||
sizeInBytesCursor int
|
||||
}
|
||||
|
||||
// NewBulkService initializes a new BulkService.
|
||||
func NewBulkService(client *Client) *BulkService {
|
||||
builder := &BulkService{
|
||||
client: client,
|
||||
}
|
||||
return builder
|
||||
}
|
||||
|
||||
func (s *BulkService) reset() {
|
||||
s.requests = make([]BulkableRequest, 0)
|
||||
s.sizeInBytes = 0
|
||||
s.sizeInBytesCursor = 0
|
||||
}
|
||||
|
||||
// Index specifies the index to use for all batches. You may also leave
|
||||
// this blank and specify the index in the individual bulk requests.
|
||||
func (s *BulkService) Index(index string) *BulkService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Type specifies the type to use for all batches. You may also leave
|
||||
// this blank and specify the type in the individual bulk requests.
|
||||
func (s *BulkService) Type(typ string) *BulkService {
|
||||
s.typ = typ
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is a global timeout for processing bulk requests. This is a
|
||||
// server-side timeout, i.e. it tells Elasticsearch the time after which
|
||||
// it should stop processing.
|
||||
func (s *BulkService) Timeout(timeout string) *BulkService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Refresh controls when changes made by this request are made visible
|
||||
// to search. The allowed values are: "true" (refresh the relevant
|
||||
// primary and replica shards immediately), "wait_for" (wait for the
|
||||
// changes to be made visible by a refresh before applying), or "false"
|
||||
// (no refresh related actions).
|
||||
func (s *BulkService) Refresh(refresh string) *BulkService {
|
||||
s.refresh = refresh
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing specifies the routing value.
|
||||
func (s *BulkService) Routing(routing string) *BulkService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// Pipeline specifies the pipeline id to preprocess incoming documents with.
|
||||
func (s *BulkService) Pipeline(pipeline string) *BulkService {
|
||||
s.pipeline = pipeline
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForActiveShards sets the number of shard copies that must be active
|
||||
// before proceeding with the bulk operation. Defaults to 1, meaning the
|
||||
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||
// any non-negative value less than or equal to the total number of copies
|
||||
// for the shard (number of replicas + 1).
|
||||
func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService {
|
||||
s.waitForActiveShards = waitForActiveShards
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty tells Elasticsearch whether to return a formatted JSON response.
|
||||
func (s *BulkService) Pretty(pretty bool) *BulkService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest,
|
||||
// and/or BulkDeleteRequest.
|
||||
func (s *BulkService) Add(requests ...BulkableRequest) *BulkService {
|
||||
for _, r := range requests {
|
||||
s.requests = append(s.requests, r)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// EstimatedSizeInBytes returns the estimated size of all bulkable
|
||||
// requests added via Add.
|
||||
func (s *BulkService) EstimatedSizeInBytes() int64 {
|
||||
if s.sizeInBytesCursor == len(s.requests) {
|
||||
return s.sizeInBytes
|
||||
}
|
||||
for _, r := range s.requests[s.sizeInBytesCursor:] {
|
||||
s.sizeInBytes += s.estimateSizeInBytes(r)
|
||||
s.sizeInBytesCursor++
|
||||
}
|
||||
return s.sizeInBytes
|
||||
}
|
||||
|
||||
// estimateSizeInBytes returns the estimates size of the given
|
||||
// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and
|
||||
// BulkDeleteRequest.
|
||||
func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 {
|
||||
lines, _ := r.Source()
|
||||
size := 0
|
||||
for _, line := range lines {
|
||||
// +1 for the \n
|
||||
size += len(line) + 1
|
||||
}
|
||||
return int64(size)
|
||||
}
|
||||
|
||||
// NumberOfActions returns the number of bulkable requests that need to
|
||||
// be sent to Elasticsearch on the next batch.
|
||||
func (s *BulkService) NumberOfActions() int {
|
||||
return len(s.requests)
|
||||
}
|
||||
|
||||
func (s *BulkService) bodyAsString() (string, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for _, req := range s.requests {
|
||||
source, err := req.Source()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, line := range source {
|
||||
buf.WriteString(line)
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// Do sends the batched requests to Elasticsearch. Note that, when successful,
|
||||
// you can reuse the BulkService for the next batch as the list of bulk
|
||||
// requests is cleared on success.
|
||||
func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) {
|
||||
// No actions?
|
||||
if s.NumberOfActions() == 0 {
|
||||
return nil, errors.New("elastic: No bulk actions to commit")
|
||||
}
|
||||
|
||||
// Get body
|
||||
body, err := s.bodyAsString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build url
|
||||
path := "/"
|
||||
if len(s.index) > 0 {
|
||||
index, err := uritemplates.Expand("{index}", map[string]string{
|
||||
"index": s.index,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path += index + "/"
|
||||
}
|
||||
if len(s.typ) > 0 {
|
||||
typ, err := uritemplates.Expand("{type}", map[string]string{
|
||||
"type": s.typ,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path += typ + "/"
|
||||
}
|
||||
path += "_bulk"
|
||||
|
||||
// Parameters
|
||||
params := make(url.Values)
|
||||
if s.pretty {
|
||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||
}
|
||||
if s.pipeline != "" {
|
||||
params.Set("pipeline", s.pipeline)
|
||||
}
|
||||
if s.refresh != "" {
|
||||
params.Set("refresh", s.refresh)
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.waitForActiveShards != "" {
|
||||
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||
}
|
||||
|
||||
// Get response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return results
|
||||
ret := new(BulkResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reset so the request can be reused
|
||||
s.reset()
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// BulkResponse is a response to a bulk execution.
|
||||
//
|
||||
// Example:
|
||||
// {
|
||||
// "took":3,
|
||||
// "errors":false,
|
||||
// "items":[{
|
||||
// "index":{
|
||||
// "_index":"index1",
|
||||
// "_type":"tweet",
|
||||
// "_id":"1",
|
||||
// "_version":3,
|
||||
// "status":201
|
||||
// }
|
||||
// },{
|
||||
// "index":{
|
||||
// "_index":"index2",
|
||||
// "_type":"tweet",
|
||||
// "_id":"2",
|
||||
// "_version":3,
|
||||
// "status":200
|
||||
// }
|
||||
// },{
|
||||
// "delete":{
|
||||
// "_index":"index1",
|
||||
// "_type":"tweet",
|
||||
// "_id":"1",
|
||||
// "_version":4,
|
||||
// "status":200,
|
||||
// "found":true
|
||||
// }
|
||||
// },{
|
||||
// "update":{
|
||||
// "_index":"index2",
|
||||
// "_type":"tweet",
|
||||
// "_id":"2",
|
||||
// "_version":4,
|
||||
// "status":200
|
||||
// }
|
||||
// }]
|
||||
// }
|
||||
type BulkResponse struct {
|
||||
Took int `json:"took,omitempty"`
|
||||
Errors bool `json:"errors,omitempty"`
|
||||
Items []map[string]*BulkResponseItem `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
// BulkResponseItem is the result of a single bulk request.
|
||||
type BulkResponseItem struct {
|
||||
Index string `json:"_index,omitempty"`
|
||||
Type string `json:"_type,omitempty"`
|
||||
Id string `json:"_id,omitempty"`
|
||||
Version int64 `json:"_version,omitempty"`
|
||||
Status int `json:"status,omitempty"`
|
||||
Found bool `json:"found,omitempty"`
|
||||
Error *ErrorDetails `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// Indexed returns all bulk request results of "index" actions.
|
||||
func (r *BulkResponse) Indexed() []*BulkResponseItem {
|
||||
return r.ByAction("index")
|
||||
}
|
||||
|
||||
// Created returns all bulk request results of "create" actions.
|
||||
func (r *BulkResponse) Created() []*BulkResponseItem {
|
||||
return r.ByAction("create")
|
||||
}
|
||||
|
||||
// Updated returns all bulk request results of "update" actions.
|
||||
func (r *BulkResponse) Updated() []*BulkResponseItem {
|
||||
return r.ByAction("update")
|
||||
}
|
||||
|
||||
// Deleted returns all bulk request results of "delete" actions.
|
||||
func (r *BulkResponse) Deleted() []*BulkResponseItem {
|
||||
return r.ByAction("delete")
|
||||
}
|
||||
|
||||
// ByAction returns all bulk request results of a certain action,
|
||||
// e.g. "index" or "delete".
|
||||
func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
|
||||
if r.Items == nil {
|
||||
return nil
|
||||
}
|
||||
var items []*BulkResponseItem
|
||||
for _, item := range r.Items {
|
||||
if result, found := item[action]; found {
|
||||
items = append(items, result)
|
||||
}
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
// ById returns all bulk request results of a given document id,
|
||||
// regardless of the action ("index", "delete" etc.).
|
||||
func (r *BulkResponse) ById(id string) []*BulkResponseItem {
|
||||
if r.Items == nil {
|
||||
return nil
|
||||
}
|
||||
var items []*BulkResponseItem
|
||||
for _, item := range r.Items {
|
||||
for _, result := range item {
|
||||
if result.Id == id {
|
||||
items = append(items, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
// Failed returns those items of a bulk response that have errors,
|
||||
// i.e. those that don't have a status code between 200 and 299.
|
||||
func (r *BulkResponse) Failed() []*BulkResponseItem {
|
||||
if r.Items == nil {
|
||||
return nil
|
||||
}
|
||||
var errors []*BulkResponseItem
|
||||
for _, item := range r.Items {
|
||||
for _, result := range item {
|
||||
if !(result.Status >= 200 && result.Status <= 299) {
|
||||
errors = append(errors, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
// Succeeded returns those items of a bulk response that have no errors,
|
||||
// i.e. those have a status code between 200 and 299.
|
||||
func (r *BulkResponse) Succeeded() []*BulkResponseItem {
|
||||
if r.Items == nil {
|
||||
return nil
|
||||
}
|
||||
var succeeded []*BulkResponseItem
|
||||
for _, item := range r.Items {
|
||||
for _, result := range item {
|
||||
if result.Status >= 200 && result.Status <= 299 {
|
||||
succeeded = append(succeeded, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
return succeeded
|
||||
}
|
||||
145
vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go
generated
vendored
Normal file
145
vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// -- Bulk delete request --
|
||||
|
||||
// BulkDeleteRequest is a request to remove a document from Elasticsearch.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for details.
|
||||
type BulkDeleteRequest struct {
|
||||
BulkableRequest
|
||||
index string
|
||||
typ string
|
||||
id string
|
||||
parent string
|
||||
routing string
|
||||
version int64 // default is MATCH_ANY
|
||||
versionType string // default is "internal"
|
||||
|
||||
source []string
|
||||
}
|
||||
|
||||
// NewBulkDeleteRequest returns a new BulkDeleteRequest.
|
||||
func NewBulkDeleteRequest() *BulkDeleteRequest {
|
||||
return &BulkDeleteRequest{}
|
||||
}
|
||||
|
||||
// Index specifies the Elasticsearch index to use for this delete request.
|
||||
// If unspecified, the index set on the BulkService will be used.
|
||||
func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
|
||||
r.index = index
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Type specifies the Elasticsearch type to use for this delete request.
|
||||
// If unspecified, the type set on the BulkService will be used.
|
||||
func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
|
||||
r.typ = typ
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Id specifies the identifier of the document to delete.
|
||||
func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
|
||||
r.id = id
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Parent specifies the parent of the request, which is used in parent/child
|
||||
// mappings.
|
||||
func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest {
|
||||
r.parent = parent
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Routing specifies a routing value for the request.
|
||||
func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
|
||||
r.routing = routing
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Version indicates the version to be deleted as part of an optimistic
|
||||
// concurrency model.
|
||||
func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
|
||||
r.version = version
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// VersionType can be "internal" (default), "external", "external_gte",
|
||||
// "external_gt", or "force".
|
||||
func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
|
||||
r.versionType = versionType
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// String returns the on-wire representation of the delete request,
|
||||
// concatenated as a single string.
|
||||
func (r *BulkDeleteRequest) String() string {
|
||||
lines, err := r.Source()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error: %v", err)
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// Source returns the on-wire representation of the delete request,
|
||||
// split into an action-and-meta-data line and an (optional) source line.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for details.
|
||||
func (r *BulkDeleteRequest) Source() ([]string, error) {
|
||||
if r.source != nil {
|
||||
return r.source, nil
|
||||
}
|
||||
lines := make([]string, 1)
|
||||
|
||||
source := make(map[string]interface{})
|
||||
deleteCommand := make(map[string]interface{})
|
||||
if r.index != "" {
|
||||
deleteCommand["_index"] = r.index
|
||||
}
|
||||
if r.typ != "" {
|
||||
deleteCommand["_type"] = r.typ
|
||||
}
|
||||
if r.id != "" {
|
||||
deleteCommand["_id"] = r.id
|
||||
}
|
||||
if r.parent != "" {
|
||||
deleteCommand["_parent"] = r.parent
|
||||
}
|
||||
if r.routing != "" {
|
||||
deleteCommand["_routing"] = r.routing
|
||||
}
|
||||
if r.version > 0 {
|
||||
deleteCommand["_version"] = r.version
|
||||
}
|
||||
if r.versionType != "" {
|
||||
deleteCommand["_version_type"] = r.versionType
|
||||
}
|
||||
source["delete"] = deleteCommand
|
||||
|
||||
body, err := json.Marshal(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lines[0] = string(body)
|
||||
r.source = lines
|
||||
|
||||
return lines, nil
|
||||
}
|
||||
68
vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go
generated
vendored
Normal file
68
vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request_test.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBulkDeleteRequestSerialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
Request BulkableRequest
|
||||
Expected []string
|
||||
}{
|
||||
// #0
|
||||
{
|
||||
Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
|
||||
Expected: []string{
|
||||
`{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
|
||||
},
|
||||
},
|
||||
// #1
|
||||
{
|
||||
Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Parent("2"),
|
||||
Expected: []string{
|
||||
`{"delete":{"_id":"1","_index":"index1","_parent":"2","_type":"tweet"}}`,
|
||||
},
|
||||
},
|
||||
// #2
|
||||
{
|
||||
Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1").Routing("3"),
|
||||
Expected: []string{
|
||||
`{"delete":{"_id":"1","_index":"index1","_routing":"3","_type":"tweet"}}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
lines, err := test.Request.Source()
|
||||
if err != nil {
|
||||
t.Fatalf("case #%d: expected no error, got: %v", i, err)
|
||||
}
|
||||
if lines == nil {
|
||||
t.Fatalf("case #%d: expected lines, got nil", i)
|
||||
}
|
||||
if len(lines) != len(test.Expected) {
|
||||
t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
|
||||
}
|
||||
for j, line := range lines {
|
||||
if line != test.Expected[j] {
|
||||
t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var bulkDeleteRequestSerializationResult string
|
||||
|
||||
func BenchmarkBulkDeleteRequestSerialization(b *testing.B) {
|
||||
r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
|
||||
var s string
|
||||
for n := 0; n < b.N; n++ {
|
||||
s = r.String()
|
||||
r.source = nil // Don't let caching spoil the benchmark
|
||||
}
|
||||
bulkDeleteRequestSerializationResult = s // ensure the compiler doesn't optimize
|
||||
}
|
||||
225
vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go
generated
vendored
Normal file
225
vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go
generated
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BulkIndexRequest is a request to add a document to Elasticsearch.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for details.
|
||||
type BulkIndexRequest struct {
|
||||
BulkableRequest
|
||||
index string
|
||||
typ string
|
||||
id string
|
||||
opType string
|
||||
routing string
|
||||
parent string
|
||||
version int64 // default is MATCH_ANY
|
||||
versionType string // default is "internal"
|
||||
doc interface{}
|
||||
pipeline string
|
||||
retryOnConflict *int
|
||||
ttl string
|
||||
|
||||
source []string
|
||||
}
|
||||
|
||||
// NewBulkIndexRequest returns a new BulkIndexRequest.
|
||||
// The operation type is "index" by default.
|
||||
func NewBulkIndexRequest() *BulkIndexRequest {
|
||||
return &BulkIndexRequest{
|
||||
opType: "index",
|
||||
}
|
||||
}
|
||||
|
||||
// Index specifies the Elasticsearch index to use for this index request.
|
||||
// If unspecified, the index set on the BulkService will be used.
|
||||
func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
|
||||
r.index = index
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Type specifies the Elasticsearch type to use for this index request.
|
||||
// If unspecified, the type set on the BulkService will be used.
|
||||
func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
|
||||
r.typ = typ
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Id specifies the identifier of the document to index.
|
||||
func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
|
||||
r.id = id
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// OpType specifies if this request should follow create-only or upsert
|
||||
// behavior. This follows the OpType of the standard document index API.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#operation-type
|
||||
// for details.
|
||||
func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
|
||||
r.opType = opType
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Routing specifies a routing value for the request.
|
||||
func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
|
||||
r.routing = routing
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Parent specifies the identifier of the parent document (if available).
|
||||
func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
|
||||
r.parent = parent
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Version indicates the version of the document as part of an optimistic
|
||||
// concurrency model.
|
||||
func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
|
||||
r.version = version
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// VersionType specifies how versions are created. It can be e.g. internal,
|
||||
// external, external_gte, or force.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-versioning
|
||||
// for details.
|
||||
func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
|
||||
r.versionType = versionType
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Doc specifies the document to index.
|
||||
func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
|
||||
r.doc = doc
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
||||
func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest {
|
||||
r.retryOnConflict = &retryOnConflict
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// TTL is an expiration time for the document.
|
||||
func (r *BulkIndexRequest) TTL(ttl string) *BulkIndexRequest {
|
||||
r.ttl = ttl
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Pipeline to use while processing the request.
|
||||
func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest {
|
||||
r.pipeline = pipeline
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// String returns the on-wire representation of the index request,
|
||||
// concatenated as a single string.
|
||||
func (r *BulkIndexRequest) String() string {
|
||||
lines, err := r.Source()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error: %v", err)
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// Source returns the on-wire representation of the index request,
|
||||
// split into an action-and-meta-data line and an (optional) source line.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for details.
|
||||
func (r *BulkIndexRequest) Source() ([]string, error) {
|
||||
// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
|
||||
// { "field1" : "value1" }
|
||||
|
||||
if r.source != nil {
|
||||
return r.source, nil
|
||||
}
|
||||
|
||||
lines := make([]string, 2)
|
||||
|
||||
// "index" ...
|
||||
command := make(map[string]interface{})
|
||||
indexCommand := make(map[string]interface{})
|
||||
if r.index != "" {
|
||||
indexCommand["_index"] = r.index
|
||||
}
|
||||
if r.typ != "" {
|
||||
indexCommand["_type"] = r.typ
|
||||
}
|
||||
if r.id != "" {
|
||||
indexCommand["_id"] = r.id
|
||||
}
|
||||
if r.routing != "" {
|
||||
indexCommand["_routing"] = r.routing
|
||||
}
|
||||
if r.parent != "" {
|
||||
indexCommand["_parent"] = r.parent
|
||||
}
|
||||
if r.version > 0 {
|
||||
indexCommand["_version"] = r.version
|
||||
}
|
||||
if r.versionType != "" {
|
||||
indexCommand["_version_type"] = r.versionType
|
||||
}
|
||||
if r.retryOnConflict != nil {
|
||||
indexCommand["_retry_on_conflict"] = *r.retryOnConflict
|
||||
}
|
||||
if r.ttl != "" {
|
||||
indexCommand["_ttl"] = r.ttl
|
||||
}
|
||||
if r.pipeline != "" {
|
||||
indexCommand["pipeline"] = r.pipeline
|
||||
}
|
||||
command[r.opType] = indexCommand
|
||||
line, err := json.Marshal(command)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lines[0] = string(line)
|
||||
|
||||
// "field1" ...
|
||||
if r.doc != nil {
|
||||
switch t := r.doc.(type) {
|
||||
default:
|
||||
body, err := json.Marshal(r.doc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lines[1] = string(body)
|
||||
case json.RawMessage:
|
||||
lines[1] = string(t)
|
||||
case *json.RawMessage:
|
||||
lines[1] = string(*t)
|
||||
case string:
|
||||
lines[1] = t
|
||||
case *string:
|
||||
lines[1] = *t
|
||||
}
|
||||
} else {
|
||||
lines[1] = "{}"
|
||||
}
|
||||
|
||||
r.source = lines
|
||||
return lines, nil
|
||||
}
|
||||
103
vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go
generated
vendored
Normal file
103
vendor/gopkg.in/olivere/elastic.v5/bulk_index_request_test.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBulkIndexRequestSerialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
Request BulkableRequest
|
||||
Expected []string
|
||||
}{
|
||||
// #0
|
||||
{
|
||||
Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
|
||||
Expected: []string{
|
||||
`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
|
||||
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
|
||||
},
|
||||
},
|
||||
// #1
|
||||
{
|
||||
Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
|
||||
Expected: []string{
|
||||
`{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
|
||||
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
|
||||
},
|
||||
},
|
||||
// #2
|
||||
{
|
||||
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
|
||||
Expected: []string{
|
||||
`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
|
||||
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
|
||||
},
|
||||
},
|
||||
// #3
|
||||
{
|
||||
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").RetryOnConflict(42).
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
|
||||
Expected: []string{
|
||||
`{"index":{"_id":"1","_index":"index1","_retry_on_conflict":42,"_type":"tweet"}}`,
|
||||
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
|
||||
},
|
||||
},
|
||||
// #4
|
||||
{
|
||||
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").Pipeline("my_pipeline").
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
|
||||
Expected: []string{
|
||||
`{"index":{"_id":"1","_index":"index1","_type":"tweet","pipeline":"my_pipeline"}}`,
|
||||
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
|
||||
},
|
||||
},
|
||||
// #5
|
||||
{
|
||||
Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").TTL("1m").
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
|
||||
Expected: []string{
|
||||
`{"index":{"_id":"1","_index":"index1","_ttl":"1m","_type":"tweet"}}`,
|
||||
`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
lines, err := test.Request.Source()
|
||||
if err != nil {
|
||||
t.Fatalf("case #%d: expected no error, got: %v", i, err)
|
||||
}
|
||||
if lines == nil {
|
||||
t.Fatalf("case #%d: expected lines, got nil", i)
|
||||
}
|
||||
if len(lines) != len(test.Expected) {
|
||||
t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
|
||||
}
|
||||
for j, line := range lines {
|
||||
if line != test.Expected[j] {
|
||||
t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var bulkIndexRequestSerializationResult string
|
||||
|
||||
func BenchmarkBulkIndexRequestSerialization(b *testing.B) {
|
||||
r := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").
|
||||
Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)})
|
||||
var s string
|
||||
for n := 0; n < b.N; n++ {
|
||||
s = r.String()
|
||||
r.source = nil // Don't let caching spoil the benchmark
|
||||
}
|
||||
bulkIndexRequestSerializationResult = s // ensure the compiler doesn't optimize
|
||||
}
|
||||
545
vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go
generated
vendored
Normal file
545
vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go
generated
vendored
Normal file
@@ -0,0 +1,545 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BulkProcessorService allows to easily process bulk requests. It allows setting
|
||||
// policies when to flush new bulk requests, e.g. based on a number of actions,
|
||||
// on the size of the actions, and/or to flush periodically. It also allows
|
||||
// to control the number of concurrent bulk requests allowed to be executed
|
||||
// in parallel.
|
||||
//
|
||||
// BulkProcessorService, by default, commits either every 1000 requests or when the
|
||||
// (estimated) size of the bulk requests exceeds 5 MB. However, it does not
|
||||
// commit periodically. BulkProcessorService also does retry by default, using
|
||||
// an exponential backoff algorithm.
|
||||
//
|
||||
// The caller is responsible for setting the index and type on every
|
||||
// bulk request added to BulkProcessorService.
|
||||
//
|
||||
// BulkProcessorService takes ideas from the BulkProcessor of the
|
||||
// Elasticsearch Java API as documented in
|
||||
// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html.
|
||||
type BulkProcessorService struct {
|
||||
c *Client
|
||||
beforeFn BulkBeforeFunc
|
||||
afterFn BulkAfterFunc
|
||||
name string // name of processor
|
||||
numWorkers int // # of workers (>= 1)
|
||||
bulkActions int // # of requests after which to commit
|
||||
bulkSize int // # of bytes after which to commit
|
||||
flushInterval time.Duration // periodic flush interval
|
||||
wantStats bool // indicates whether to gather statistics
|
||||
initialTimeout time.Duration // initial wait time before retry on errors
|
||||
maxTimeout time.Duration // max time to wait for retry on errors
|
||||
}
|
||||
|
||||
// NewBulkProcessorService creates a new BulkProcessorService.
|
||||
func NewBulkProcessorService(client *Client) *BulkProcessorService {
|
||||
return &BulkProcessorService{
|
||||
c: client,
|
||||
numWorkers: 1,
|
||||
bulkActions: 1000,
|
||||
bulkSize: 5 << 20, // 5 MB
|
||||
initialTimeout: time.Duration(200) * time.Millisecond,
|
||||
maxTimeout: time.Duration(10000) * time.Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
// BulkBeforeFunc defines the signature of callbacks that are executed
|
||||
// before a commit to Elasticsearch.
|
||||
type BulkBeforeFunc func(executionId int64, requests []BulkableRequest)
|
||||
|
||||
// BulkAfterFunc defines the signature of callbacks that are executed
|
||||
// after a commit to Elasticsearch. The err parameter signals an error.
|
||||
type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error)
|
||||
|
||||
// Before specifies a function to be executed before bulk requests get comitted
|
||||
// to Elasticsearch.
|
||||
func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService {
|
||||
s.beforeFn = fn
|
||||
return s
|
||||
}
|
||||
|
||||
// After specifies a function to be executed when bulk requests have been
|
||||
// comitted to Elasticsearch. The After callback executes both when the
|
||||
// commit was successful as well as on failures.
|
||||
func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService {
|
||||
s.afterFn = fn
|
||||
return s
|
||||
}
|
||||
|
||||
// Name is an optional name to identify this bulk processor.
|
||||
func (s *BulkProcessorService) Name(name string) *BulkProcessorService {
|
||||
s.name = name
|
||||
return s
|
||||
}
|
||||
|
||||
// Workers is the number of concurrent workers allowed to be
|
||||
// executed. Defaults to 1 and must be greater or equal to 1.
|
||||
func (s *BulkProcessorService) Workers(num int) *BulkProcessorService {
|
||||
s.numWorkers = num
|
||||
return s
|
||||
}
|
||||
|
||||
// BulkActions specifies when to flush based on the number of actions
|
||||
// currently added. Defaults to 1000 and can be set to -1 to be disabled.
|
||||
func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService {
|
||||
s.bulkActions = bulkActions
|
||||
return s
|
||||
}
|
||||
|
||||
// BulkSize specifies when to flush based on the size (in bytes) of the actions
|
||||
// currently added. Defaults to 5 MB and can be set to -1 to be disabled.
|
||||
func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService {
|
||||
s.bulkSize = bulkSize
|
||||
return s
|
||||
}
|
||||
|
||||
// FlushInterval specifies when to flush at the end of the given interval.
|
||||
// This is disabled by default. If you want the bulk processor to
|
||||
// operate completely asynchronously, set both BulkActions and BulkSize to
|
||||
// -1 and set the FlushInterval to a meaningful interval.
|
||||
func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService {
|
||||
s.flushInterval = interval
|
||||
return s
|
||||
}
|
||||
|
||||
// Stats tells bulk processor to gather stats while running.
|
||||
// Use Stats to return the stats. This is disabled by default.
|
||||
func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService {
|
||||
s.wantStats = wantStats
|
||||
return s
|
||||
}
|
||||
|
||||
// Do creates a new BulkProcessor and starts it.
|
||||
// Consider the BulkProcessor as a running instance that accepts bulk requests
|
||||
// and commits them to Elasticsearch, spreading the work across one or more
|
||||
// workers.
|
||||
//
|
||||
// You can interoperate with the BulkProcessor returned by Do, e.g. Start and
|
||||
// Stop (or Close) it.
|
||||
//
|
||||
// Context is an optional context that is passed into the bulk request
|
||||
// service calls. In contrast to other operations, this context is used in
|
||||
// a long running process. You could use it to pass e.g. loggers, but you
|
||||
// shouldn't use it for cancellation.
|
||||
//
|
||||
// Calling Do several times returns new BulkProcessors. You probably don't
|
||||
// want to do this. BulkProcessorService implements just a builder pattern.
|
||||
func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) {
|
||||
p := newBulkProcessor(
|
||||
s.c,
|
||||
s.beforeFn,
|
||||
s.afterFn,
|
||||
s.name,
|
||||
s.numWorkers,
|
||||
s.bulkActions,
|
||||
s.bulkSize,
|
||||
s.flushInterval,
|
||||
s.wantStats,
|
||||
s.initialTimeout,
|
||||
s.maxTimeout)
|
||||
|
||||
err := p.Start(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// -- Bulk Processor Statistics --
|
||||
|
||||
// BulkProcessorStats contains various statistics of a bulk processor
|
||||
// while it is running. Use the Stats func to return it while running.
|
||||
type BulkProcessorStats struct {
|
||||
Flushed int64 // number of times the flush interval has been invoked
|
||||
Committed int64 // # of times workers committed bulk requests
|
||||
Indexed int64 // # of requests indexed
|
||||
Created int64 // # of requests that ES reported as creates (201)
|
||||
Updated int64 // # of requests that ES reported as updates
|
||||
Deleted int64 // # of requests that ES reported as deletes
|
||||
Succeeded int64 // # of requests that ES reported as successful
|
||||
Failed int64 // # of requests that ES reported as failed
|
||||
|
||||
Workers []*BulkProcessorWorkerStats // stats for each worker
|
||||
}
|
||||
|
||||
// BulkProcessorWorkerStats represents per-worker statistics.
|
||||
type BulkProcessorWorkerStats struct {
|
||||
Queued int64 // # of requests queued in this worker
|
||||
LastDuration time.Duration // duration of last commit
|
||||
}
|
||||
|
||||
// newBulkProcessorStats initializes and returns a BulkProcessorStats struct.
|
||||
func newBulkProcessorStats(workers int) *BulkProcessorStats {
|
||||
stats := &BulkProcessorStats{
|
||||
Workers: make([]*BulkProcessorWorkerStats, workers),
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
stats.Workers[i] = &BulkProcessorWorkerStats{}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (st *BulkProcessorStats) dup() *BulkProcessorStats {
|
||||
dst := new(BulkProcessorStats)
|
||||
dst.Flushed = st.Flushed
|
||||
dst.Committed = st.Committed
|
||||
dst.Indexed = st.Indexed
|
||||
dst.Created = st.Created
|
||||
dst.Updated = st.Updated
|
||||
dst.Deleted = st.Deleted
|
||||
dst.Succeeded = st.Succeeded
|
||||
dst.Failed = st.Failed
|
||||
for _, src := range st.Workers {
|
||||
dst.Workers = append(dst.Workers, src.dup())
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats {
|
||||
dst := new(BulkProcessorWorkerStats)
|
||||
dst.Queued = st.Queued
|
||||
dst.LastDuration = st.LastDuration
|
||||
return dst
|
||||
}
|
||||
|
||||
// -- Bulk Processor --
|
||||
|
||||
// BulkProcessor encapsulates a task that accepts bulk requests and
|
||||
// orchestrates committing them to Elasticsearch via one or more workers.
|
||||
//
|
||||
// BulkProcessor is returned by setting up a BulkProcessorService and
|
||||
// calling the Do method.
|
||||
type BulkProcessor struct {
|
||||
c *Client
|
||||
beforeFn BulkBeforeFunc
|
||||
afterFn BulkAfterFunc
|
||||
name string
|
||||
bulkActions int
|
||||
bulkSize int
|
||||
numWorkers int
|
||||
executionId int64
|
||||
requestsC chan BulkableRequest
|
||||
workerWg sync.WaitGroup
|
||||
workers []*bulkWorker
|
||||
flushInterval time.Duration
|
||||
flusherStopC chan struct{}
|
||||
wantStats bool
|
||||
initialTimeout time.Duration // initial wait time before retry on errors
|
||||
maxTimeout time.Duration // max time to wait for retry on errors
|
||||
|
||||
startedMu sync.Mutex // guards the following block
|
||||
started bool
|
||||
|
||||
statsMu sync.Mutex // guards the following block
|
||||
stats *BulkProcessorStats
|
||||
}
|
||||
|
||||
func newBulkProcessor(
|
||||
client *Client,
|
||||
beforeFn BulkBeforeFunc,
|
||||
afterFn BulkAfterFunc,
|
||||
name string,
|
||||
numWorkers int,
|
||||
bulkActions int,
|
||||
bulkSize int,
|
||||
flushInterval time.Duration,
|
||||
wantStats bool,
|
||||
initialTimeout time.Duration,
|
||||
maxTimeout time.Duration) *BulkProcessor {
|
||||
return &BulkProcessor{
|
||||
c: client,
|
||||
beforeFn: beforeFn,
|
||||
afterFn: afterFn,
|
||||
name: name,
|
||||
numWorkers: numWorkers,
|
||||
bulkActions: bulkActions,
|
||||
bulkSize: bulkSize,
|
||||
flushInterval: flushInterval,
|
||||
wantStats: wantStats,
|
||||
initialTimeout: initialTimeout,
|
||||
maxTimeout: maxTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the bulk processor. If the processor is already started,
|
||||
// nil is returned.
|
||||
func (p *BulkProcessor) Start(ctx context.Context) error {
|
||||
p.startedMu.Lock()
|
||||
defer p.startedMu.Unlock()
|
||||
|
||||
if p.started {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We must have at least one worker.
|
||||
if p.numWorkers < 1 {
|
||||
p.numWorkers = 1
|
||||
}
|
||||
|
||||
p.requestsC = make(chan BulkableRequest)
|
||||
p.executionId = 0
|
||||
p.stats = newBulkProcessorStats(p.numWorkers)
|
||||
|
||||
// Create and start up workers.
|
||||
p.workers = make([]*bulkWorker, p.numWorkers)
|
||||
for i := 0; i < p.numWorkers; i++ {
|
||||
p.workerWg.Add(1)
|
||||
p.workers[i] = newBulkWorker(p, i)
|
||||
go p.workers[i].work(ctx)
|
||||
}
|
||||
|
||||
// Start the ticker for flush (if enabled)
|
||||
if int64(p.flushInterval) > 0 {
|
||||
p.flusherStopC = make(chan struct{})
|
||||
go p.flusher(p.flushInterval)
|
||||
}
|
||||
|
||||
p.started = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop is an alias for Close.
|
||||
func (p *BulkProcessor) Stop() error {
|
||||
return p.Close()
|
||||
}
|
||||
|
||||
// Close stops the bulk processor previously started with Do.
|
||||
// If it is already stopped, this is a no-op and nil is returned.
|
||||
//
|
||||
// By implementing Close, BulkProcessor implements the io.Closer interface.
|
||||
func (p *BulkProcessor) Close() error {
|
||||
p.startedMu.Lock()
|
||||
defer p.startedMu.Unlock()
|
||||
|
||||
// Already stopped? Do nothing.
|
||||
if !p.started {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop flusher (if enabled)
|
||||
if p.flusherStopC != nil {
|
||||
p.flusherStopC <- struct{}{}
|
||||
<-p.flusherStopC
|
||||
close(p.flusherStopC)
|
||||
p.flusherStopC = nil
|
||||
}
|
||||
|
||||
// Stop all workers.
|
||||
close(p.requestsC)
|
||||
p.workerWg.Wait()
|
||||
|
||||
p.started = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stats returns the latest bulk processor statistics.
|
||||
// Collecting stats must be enabled first by calling Stats(true) on
|
||||
// the service that created this processor.
|
||||
func (p *BulkProcessor) Stats() BulkProcessorStats {
|
||||
p.statsMu.Lock()
|
||||
defer p.statsMu.Unlock()
|
||||
return *p.stats.dup()
|
||||
}
|
||||
|
||||
// Add adds a single request to commit by the BulkProcessorService.
|
||||
//
|
||||
// The caller is responsible for setting the index and type on the request.
|
||||
func (p *BulkProcessor) Add(request BulkableRequest) {
|
||||
p.requestsC <- request
|
||||
}
|
||||
|
||||
// Flush manually asks all workers to commit their outstanding requests.
|
||||
// It returns only when all workers acknowledge completion.
|
||||
func (p *BulkProcessor) Flush() error {
|
||||
p.statsMu.Lock()
|
||||
p.stats.Flushed++
|
||||
p.statsMu.Unlock()
|
||||
|
||||
for _, w := range p.workers {
|
||||
w.flushC <- struct{}{}
|
||||
<-w.flushAckC // wait for completion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// flusher is a single goroutine that periodically asks all workers to
|
||||
// commit their outstanding bulk requests. It is only started if
|
||||
// FlushInterval is greater than 0.
|
||||
func (p *BulkProcessor) flusher(interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C: // Periodic flush
|
||||
p.Flush() // TODO swallow errors here?
|
||||
|
||||
case <-p.flusherStopC:
|
||||
p.flusherStopC <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -- Bulk Worker --
|
||||
|
||||
// bulkWorker encapsulates a single worker, running in a goroutine,
|
||||
// receiving bulk requests and eventually committing them to Elasticsearch.
|
||||
// It is strongly bound to a BulkProcessor.
|
||||
type bulkWorker struct {
|
||||
p *BulkProcessor
|
||||
i int
|
||||
bulkActions int
|
||||
bulkSize int
|
||||
service *BulkService
|
||||
flushC chan struct{}
|
||||
flushAckC chan struct{}
|
||||
}
|
||||
|
||||
// newBulkWorker creates a new bulkWorker instance.
|
||||
func newBulkWorker(p *BulkProcessor, i int) *bulkWorker {
|
||||
return &bulkWorker{
|
||||
p: p,
|
||||
i: i,
|
||||
bulkActions: p.bulkActions,
|
||||
bulkSize: p.bulkSize,
|
||||
service: NewBulkService(p.c),
|
||||
flushC: make(chan struct{}),
|
||||
flushAckC: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// work waits for bulk requests and manual flush calls on the respective
|
||||
// channels and is invoked as a goroutine when the bulk processor is started.
|
||||
func (w *bulkWorker) work(ctx context.Context) {
|
||||
defer func() {
|
||||
w.p.workerWg.Done()
|
||||
close(w.flushAckC)
|
||||
close(w.flushC)
|
||||
}()
|
||||
|
||||
var stop bool
|
||||
for !stop {
|
||||
select {
|
||||
case req, open := <-w.p.requestsC:
|
||||
if open {
|
||||
// Received a new request
|
||||
w.service.Add(req)
|
||||
if w.commitRequired() {
|
||||
w.commit(ctx) // TODO swallow errors here?
|
||||
}
|
||||
} else {
|
||||
// Channel closed: Stop.
|
||||
stop = true
|
||||
if w.service.NumberOfActions() > 0 {
|
||||
w.commit(ctx) // TODO swallow errors here?
|
||||
}
|
||||
}
|
||||
|
||||
case <-w.flushC:
|
||||
// Commit outstanding requests
|
||||
if w.service.NumberOfActions() > 0 {
|
||||
w.commit(ctx) // TODO swallow errors here?
|
||||
}
|
||||
w.flushAckC <- struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// commit commits the bulk requests in the given service,
|
||||
// invoking callbacks as specified.
|
||||
func (w *bulkWorker) commit(ctx context.Context) error {
|
||||
var res *BulkResponse
|
||||
|
||||
// commitFunc will commit bulk requests and, on failure, be retried
|
||||
// via exponential backoff
|
||||
commitFunc := func() error {
|
||||
var err error
|
||||
res, err = w.service.Do(ctx)
|
||||
return err
|
||||
}
|
||||
// notifyFunc will be called if retry fails
|
||||
notifyFunc := func(err error) {
|
||||
w.p.c.errorf("elastic: bulk processor %q failed but will retry: %v", w.p.name, err)
|
||||
}
|
||||
|
||||
id := atomic.AddInt64(&w.p.executionId, 1)
|
||||
|
||||
// Update # documents in queue before eventual retries
|
||||
w.p.statsMu.Lock()
|
||||
if w.p.wantStats {
|
||||
w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
|
||||
}
|
||||
w.p.statsMu.Unlock()
|
||||
|
||||
// Save requests because they will be reset in commitFunc
|
||||
reqs := w.service.requests
|
||||
|
||||
// Invoke before callback
|
||||
if w.p.beforeFn != nil {
|
||||
w.p.beforeFn(id, reqs)
|
||||
}
|
||||
|
||||
// Commit bulk requests
|
||||
policy := NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout)
|
||||
err := RetryNotify(commitFunc, policy, notifyFunc)
|
||||
w.updateStats(res)
|
||||
if err != nil {
|
||||
w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err)
|
||||
}
|
||||
|
||||
// Invoke after callback
|
||||
if w.p.afterFn != nil {
|
||||
w.p.afterFn(id, reqs, res, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *bulkWorker) updateStats(res *BulkResponse) {
|
||||
// Update stats
|
||||
if res != nil {
|
||||
w.p.statsMu.Lock()
|
||||
if w.p.wantStats {
|
||||
w.p.stats.Committed++
|
||||
if res != nil {
|
||||
w.p.stats.Indexed += int64(len(res.Indexed()))
|
||||
w.p.stats.Created += int64(len(res.Created()))
|
||||
w.p.stats.Updated += int64(len(res.Updated()))
|
||||
w.p.stats.Deleted += int64(len(res.Deleted()))
|
||||
w.p.stats.Succeeded += int64(len(res.Succeeded()))
|
||||
w.p.stats.Failed += int64(len(res.Failed()))
|
||||
}
|
||||
w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests))
|
||||
w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond
|
||||
}
|
||||
w.p.statsMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// commitRequired returns true if the service has to commit its
|
||||
// bulk requests. This can be either because the number of actions
|
||||
// or the estimated size in bytes is larger than specified in the
|
||||
// BulkProcessorService.
|
||||
func (w *bulkWorker) commitRequired() bool {
|
||||
if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions {
|
||||
return true
|
||||
}
|
||||
if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
422
vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go
generated
vendored
Normal file
422
vendor/gopkg.in/olivere/elastic.v5/bulk_processor_test.go
generated
vendored
Normal file
@@ -0,0 +1,422 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBulkProcessorDefaults(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
p := client.BulkProcessor()
|
||||
if p == nil {
|
||||
t.Fatalf("expected BulkProcessorService; got: %v", p)
|
||||
}
|
||||
if got, want := p.name, ""; got != want {
|
||||
t.Errorf("expected %q; got: %q", want, got)
|
||||
}
|
||||
if got, want := p.numWorkers, 1; got != want {
|
||||
t.Errorf("expected %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := p.bulkActions, 1000; got != want {
|
||||
t.Errorf("expected %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := p.bulkSize, 5*1024*1024; got != want {
|
||||
t.Errorf("expected %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := p.flushInterval, time.Duration(0); got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := p.wantStats, false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkProcessorCommitOnBulkActions(t *testing.T) {
|
||||
//client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
testBulkProcessor(t,
|
||||
10000,
|
||||
client.BulkProcessor().
|
||||
Name("Actions-1").
|
||||
Workers(1).
|
||||
BulkActions(100).
|
||||
BulkSize(-1),
|
||||
)
|
||||
|
||||
testBulkProcessor(t,
|
||||
10000,
|
||||
client.BulkProcessor().
|
||||
Name("Actions-2").
|
||||
Workers(2).
|
||||
BulkActions(100).
|
||||
BulkSize(-1),
|
||||
)
|
||||
}
|
||||
|
||||
func TestBulkProcessorCommitOnBulkSize(t *testing.T) {
|
||||
//client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
testBulkProcessor(t,
|
||||
10000,
|
||||
client.BulkProcessor().
|
||||
Name("Size-1").
|
||||
Workers(1).
|
||||
BulkActions(-1).
|
||||
BulkSize(64*1024),
|
||||
)
|
||||
|
||||
testBulkProcessor(t,
|
||||
10000,
|
||||
client.BulkProcessor().
|
||||
Name("Size-2").
|
||||
Workers(2).
|
||||
BulkActions(-1).
|
||||
BulkSize(64*1024),
|
||||
)
|
||||
}
|
||||
|
||||
func TestBulkProcessorBasedOnFlushInterval(t *testing.T) {
|
||||
//client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
var beforeRequests int64
|
||||
var befores int64
|
||||
var afters int64
|
||||
var failures int64
|
||||
var afterRequests int64
|
||||
|
||||
beforeFn := func(executionId int64, requests []BulkableRequest) {
|
||||
atomic.AddInt64(&beforeRequests, int64(len(requests)))
|
||||
atomic.AddInt64(&befores, 1)
|
||||
}
|
||||
afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
|
||||
atomic.AddInt64(&afters, 1)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&failures, 1)
|
||||
}
|
||||
atomic.AddInt64(&afterRequests, int64(len(requests)))
|
||||
}
|
||||
|
||||
svc := client.BulkProcessor().
|
||||
Name("FlushInterval-1").
|
||||
Workers(2).
|
||||
BulkActions(-1).
|
||||
BulkSize(-1).
|
||||
FlushInterval(1 * time.Second).
|
||||
Before(beforeFn).
|
||||
After(afterFn)
|
||||
|
||||
p, err := svc.Do(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const numDocs = 1000 // low-enough number that flush should be invoked
|
||||
|
||||
for i := 1; i <= numDocs; i++ {
|
||||
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
|
||||
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
|
||||
p.Add(request)
|
||||
}
|
||||
|
||||
// Should flush at least once
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
err = p.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if p.stats.Flushed == 0 {
|
||||
t.Errorf("expected at least 1 flush; got: %d", p.stats.Flushed)
|
||||
}
|
||||
if got, want := beforeRequests, int64(numDocs); got != want {
|
||||
t.Errorf("expected %d requests to before callback; got: %d", want, got)
|
||||
}
|
||||
if got, want := afterRequests, int64(numDocs); got != want {
|
||||
t.Errorf("expected %d requests to after callback; got: %d", want, got)
|
||||
}
|
||||
if befores == 0 {
|
||||
t.Error("expected at least 1 call to before callback")
|
||||
}
|
||||
if afters == 0 {
|
||||
t.Error("expected at least 1 call to after callback")
|
||||
}
|
||||
if failures != 0 {
|
||||
t.Errorf("expected 0 calls to failure callback; got: %d", failures)
|
||||
}
|
||||
|
||||
// Check number of documents that were bulk indexed
|
||||
_, err = p.c.Flush(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err := p.c.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != int64(numDocs) {
|
||||
t.Fatalf("expected %d documents; got: %d", numDocs, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkProcessorClose(t *testing.T) {
|
||||
//client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
var beforeRequests int64
|
||||
var befores int64
|
||||
var afters int64
|
||||
var failures int64
|
||||
var afterRequests int64
|
||||
|
||||
beforeFn := func(executionId int64, requests []BulkableRequest) {
|
||||
atomic.AddInt64(&beforeRequests, int64(len(requests)))
|
||||
atomic.AddInt64(&befores, 1)
|
||||
}
|
||||
afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
|
||||
atomic.AddInt64(&afters, 1)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&failures, 1)
|
||||
}
|
||||
atomic.AddInt64(&afterRequests, int64(len(requests)))
|
||||
}
|
||||
|
||||
p, err := client.BulkProcessor().
|
||||
Name("FlushInterval-1").
|
||||
Workers(2).
|
||||
BulkActions(-1).
|
||||
BulkSize(-1).
|
||||
FlushInterval(30 * time.Second). // 30 seconds to flush
|
||||
Before(beforeFn).After(afterFn).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const numDocs = 1000 // low-enough number that flush should be invoked
|
||||
|
||||
for i := 1; i <= numDocs; i++ {
|
||||
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
|
||||
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
|
||||
p.Add(request)
|
||||
}
|
||||
|
||||
// Should not flush because 30s > 1s
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Close should flush
|
||||
err = p.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if p.stats.Flushed != 0 {
|
||||
t.Errorf("expected no flush; got: %d", p.stats.Flushed)
|
||||
}
|
||||
if got, want := beforeRequests, int64(numDocs); got != want {
|
||||
t.Errorf("expected %d requests to before callback; got: %d", want, got)
|
||||
}
|
||||
if got, want := afterRequests, int64(numDocs); got != want {
|
||||
t.Errorf("expected %d requests to after callback; got: %d", want, got)
|
||||
}
|
||||
if befores == 0 {
|
||||
t.Error("expected at least 1 call to before callback")
|
||||
}
|
||||
if afters == 0 {
|
||||
t.Error("expected at least 1 call to after callback")
|
||||
}
|
||||
if failures != 0 {
|
||||
t.Errorf("expected 0 calls to failure callback; got: %d", failures)
|
||||
}
|
||||
|
||||
// Check number of documents that were bulk indexed
|
||||
_, err = p.c.Flush(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err := p.c.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != int64(numDocs) {
|
||||
t.Fatalf("expected %d documents; got: %d", numDocs, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkProcessorFlush(t *testing.T) {
|
||||
//client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
p, err := client.BulkProcessor().
|
||||
Name("ManualFlush").
|
||||
Workers(10).
|
||||
BulkActions(-1).
|
||||
BulkSize(-1).
|
||||
FlushInterval(30 * time.Second). // 30 seconds to flush
|
||||
Stats(true).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
const numDocs = 100
|
||||
|
||||
for i := 1; i <= numDocs; i++ {
|
||||
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%d. %s", i, randomString(rand.Intn(64)))}
|
||||
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
|
||||
p.Add(request)
|
||||
}
|
||||
|
||||
// Should not flush because 30s > 1s
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// No flush yet
|
||||
stats := p.Stats()
|
||||
if stats.Flushed != 0 {
|
||||
t.Errorf("expected no flush; got: %d", p.stats.Flushed)
|
||||
}
|
||||
|
||||
// Manual flush
|
||||
err = p.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Now flushed
|
||||
stats = p.Stats()
|
||||
if got, want := p.stats.Flushed, int64(1); got != want {
|
||||
t.Errorf("expected %d flush; got: %d", want, got)
|
||||
}
|
||||
|
||||
// Close should not start another flush
|
||||
err = p.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Still 1 flush
|
||||
stats = p.Stats()
|
||||
if got, want := p.stats.Flushed, int64(1); got != want {
|
||||
t.Errorf("expected %d flush; got: %d", want, got)
|
||||
}
|
||||
|
||||
// Check number of documents that were bulk indexed
|
||||
_, err = p.c.Flush(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err := p.c.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != int64(numDocs) {
|
||||
t.Fatalf("expected %d documents; got: %d", numDocs, count)
|
||||
}
|
||||
}
|
||||
|
||||
// -- Helper --
|
||||
|
||||
func testBulkProcessor(t *testing.T, numDocs int, svc *BulkProcessorService) {
|
||||
var beforeRequests int64
|
||||
var befores int64
|
||||
var afters int64
|
||||
var failures int64
|
||||
var afterRequests int64
|
||||
|
||||
beforeFn := func(executionId int64, requests []BulkableRequest) {
|
||||
atomic.AddInt64(&beforeRequests, int64(len(requests)))
|
||||
atomic.AddInt64(&befores, 1)
|
||||
}
|
||||
afterFn := func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) {
|
||||
atomic.AddInt64(&afters, 1)
|
||||
if err != nil {
|
||||
atomic.AddInt64(&failures, 1)
|
||||
}
|
||||
atomic.AddInt64(&afterRequests, int64(len(requests)))
|
||||
}
|
||||
|
||||
p, err := svc.Before(beforeFn).After(afterFn).Stats(true).Do(context.Background())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 1; i <= numDocs; i++ {
|
||||
tweet := tweet{User: "olivere", Message: fmt.Sprintf("%07d. %s", i, randomString(1+rand.Intn(63)))}
|
||||
request := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id(fmt.Sprintf("%d", i)).Doc(tweet)
|
||||
p.Add(request)
|
||||
}
|
||||
|
||||
err = p.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stats := p.Stats()
|
||||
|
||||
if stats.Flushed != 0 {
|
||||
t.Errorf("expected no flush; got: %d", stats.Flushed)
|
||||
}
|
||||
if stats.Committed <= 0 {
|
||||
t.Errorf("expected committed > %d; got: %d", 0, stats.Committed)
|
||||
}
|
||||
if got, want := stats.Indexed, int64(numDocs); got != want {
|
||||
t.Errorf("expected indexed = %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := stats.Created, int64(0); got != want {
|
||||
t.Errorf("expected created = %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := stats.Updated, int64(0); got != want {
|
||||
t.Errorf("expected updated = %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := stats.Deleted, int64(0); got != want {
|
||||
t.Errorf("expected deleted = %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := stats.Succeeded, int64(numDocs); got != want {
|
||||
t.Errorf("expected succeeded = %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := stats.Failed, int64(0); got != want {
|
||||
t.Errorf("expected failed = %d; got: %d", want, got)
|
||||
}
|
||||
if got, want := beforeRequests, int64(numDocs); got != want {
|
||||
t.Errorf("expected %d requests to before callback; got: %d", want, got)
|
||||
}
|
||||
if got, want := afterRequests, int64(numDocs); got != want {
|
||||
t.Errorf("expected %d requests to after callback; got: %d", want, got)
|
||||
}
|
||||
if befores == 0 {
|
||||
t.Error("expected at least 1 call to before callback")
|
||||
}
|
||||
if afters == 0 {
|
||||
t.Error("expected at least 1 call to after callback")
|
||||
}
|
||||
if failures != 0 {
|
||||
t.Errorf("expected 0 calls to failure callback; got: %d", failures)
|
||||
}
|
||||
|
||||
// Check number of documents that were bulk indexed
|
||||
_, err = p.c.Flush(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err := p.c.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != int64(numDocs) {
|
||||
t.Fatalf("expected %d documents; got: %d", numDocs, count)
|
||||
}
|
||||
}
|
||||
17
vendor/gopkg.in/olivere/elastic.v5/bulk_request.go
generated
vendored
Normal file
17
vendor/gopkg.in/olivere/elastic.v5/bulk_request.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// -- Bulkable request (index/update/delete) --
|
||||
|
||||
// BulkableRequest is a generic interface to bulkable requests.
|
||||
type BulkableRequest interface {
|
||||
fmt.Stringer
|
||||
Source() ([]string, error)
|
||||
}
|
||||
507
vendor/gopkg.in/olivere/elastic.v5/bulk_test.go
generated
vendored
Normal file
507
vendor/gopkg.in/olivere/elastic.v5/bulk_test.go
generated
vendored
Normal file
@@ -0,0 +1,507 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBulk(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
|
||||
|
||||
index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
|
||||
index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
|
||||
delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
|
||||
|
||||
bulkRequest := client.Bulk()
|
||||
bulkRequest = bulkRequest.Add(index1Req)
|
||||
bulkRequest = bulkRequest.Add(index2Req)
|
||||
bulkRequest = bulkRequest.Add(delete1Req)
|
||||
|
||||
if bulkRequest.NumberOfActions() != 3 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
bulkResponse, err := bulkRequest.Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bulkResponse == nil {
|
||||
t.Errorf("expected bulkResponse to be != nil; got nil")
|
||||
}
|
||||
|
||||
if bulkRequest.NumberOfActions() != 0 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
// Document with Id="1" should not exist
|
||||
exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Errorf("expected exists %v; got %v", false, exists)
|
||||
}
|
||||
|
||||
// Document with Id="2" should exist
|
||||
exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Errorf("expected exists %v; got %v", true, exists)
|
||||
}
|
||||
|
||||
// Update
|
||||
updateDoc := struct {
|
||||
Retweets int `json:"retweets"`
|
||||
}{
|
||||
42,
|
||||
}
|
||||
update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc)
|
||||
bulkRequest = client.Bulk()
|
||||
bulkRequest = bulkRequest.Add(update1Req)
|
||||
|
||||
if bulkRequest.NumberOfActions() != 1 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
bulkResponse, err = bulkRequest.Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bulkResponse == nil {
|
||||
t.Errorf("expected bulkResponse to be != nil; got nil")
|
||||
}
|
||||
|
||||
if bulkRequest.NumberOfActions() != 0 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
// Document with Id="1" should have a retweets count of 42
|
||||
doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if doc == nil {
|
||||
t.Fatal("expected doc to be != nil; got nil")
|
||||
}
|
||||
if !doc.Found {
|
||||
t.Fatalf("expected doc to be found; got found = %v", doc.Found)
|
||||
}
|
||||
if doc.Source == nil {
|
||||
t.Fatal("expected doc source to be != nil; got nil")
|
||||
}
|
||||
var updatedTweet tweet
|
||||
err = json.Unmarshal(*doc.Source, &updatedTweet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if updatedTweet.Retweets != 42 {
|
||||
t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
|
||||
}
|
||||
|
||||
// Update with script
|
||||
update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
|
||||
RetryOnConflict(3).
|
||||
Script(NewScript("ctx._source.retweets += params.v").Param("v", 1))
|
||||
bulkRequest = client.Bulk()
|
||||
bulkRequest = bulkRequest.Add(update2Req)
|
||||
if bulkRequest.NumberOfActions() != 1 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
|
||||
}
|
||||
bulkResponse, err = bulkRequest.Refresh("wait_for").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bulkResponse == nil {
|
||||
t.Errorf("expected bulkResponse to be != nil; got nil")
|
||||
}
|
||||
|
||||
if bulkRequest.NumberOfActions() != 0 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
// Document with Id="1" should have a retweets count of 43
|
||||
doc, err = client.Get().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if doc == nil {
|
||||
t.Fatal("expected doc to be != nil; got nil")
|
||||
}
|
||||
if !doc.Found {
|
||||
t.Fatalf("expected doc to be found; got found = %v", doc.Found)
|
||||
}
|
||||
if doc.Source == nil {
|
||||
t.Fatal("expected doc source to be != nil; got nil")
|
||||
}
|
||||
err = json.Unmarshal(*doc.Source, &updatedTweet)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if updatedTweet.Retweets != 43 {
|
||||
t.Errorf("expected updated tweet retweets = %v; got %v", 43, updatedTweet.Retweets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkWithIndexSetOnClient(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
|
||||
|
||||
index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
|
||||
index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
|
||||
delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
|
||||
|
||||
bulkRequest := client.Bulk().Index(testIndexName).Type("tweet")
|
||||
bulkRequest = bulkRequest.Add(index1Req)
|
||||
bulkRequest = bulkRequest.Add(index2Req)
|
||||
bulkRequest = bulkRequest.Add(delete1Req)
|
||||
|
||||
if bulkRequest.NumberOfActions() != 3 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
bulkResponse, err := bulkRequest.Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bulkResponse == nil {
|
||||
t.Errorf("expected bulkResponse to be != nil; got nil")
|
||||
}
|
||||
|
||||
// Document with Id="1" should not exist
|
||||
exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Errorf("expected exists %v; got %v", false, exists)
|
||||
}
|
||||
|
||||
// Document with Id="2" should exist
|
||||
exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Errorf("expected exists %v; got %v", true, exists)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkRequestsSerialization(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
|
||||
|
||||
index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
|
||||
index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
|
||||
delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
|
||||
update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
|
||||
Doc(struct {
|
||||
Retweets int `json:"retweets"`
|
||||
}{
|
||||
Retweets: 42,
|
||||
})
|
||||
|
||||
bulkRequest := client.Bulk()
|
||||
bulkRequest = bulkRequest.Add(index1Req)
|
||||
bulkRequest = bulkRequest.Add(index2Req)
|
||||
bulkRequest = bulkRequest.Add(delete1Req)
|
||||
bulkRequest = bulkRequest.Add(update2Req)
|
||||
|
||||
if bulkRequest.NumberOfActions() != 4 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
|
||||
{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
|
||||
{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
|
||||
{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
|
||||
{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
|
||||
{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
|
||||
{"doc":{"retweets":42}}
|
||||
`
|
||||
got, err := bulkRequest.bodyAsString()
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\ngot:\n%s", expected, got)
|
||||
}
|
||||
|
||||
// Run the bulk request
|
||||
bulkResponse, err := bulkRequest.Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bulkResponse == nil {
|
||||
t.Errorf("expected bulkResponse to be != nil; got nil")
|
||||
}
|
||||
if bulkResponse.Took == 0 {
|
||||
t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
|
||||
}
|
||||
if bulkResponse.Errors {
|
||||
t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
|
||||
}
|
||||
if len(bulkResponse.Items) != 4 {
|
||||
t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
|
||||
}
|
||||
|
||||
// Indexed actions
|
||||
indexed := bulkResponse.Indexed()
|
||||
if indexed == nil {
|
||||
t.Fatal("expected indexed to be != nil; got nil")
|
||||
}
|
||||
if len(indexed) != 1 {
|
||||
t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
|
||||
}
|
||||
if indexed[0].Id != "1" {
|
||||
t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
|
||||
}
|
||||
if indexed[0].Status != 201 {
|
||||
t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
|
||||
}
|
||||
|
||||
// Created actions
|
||||
created := bulkResponse.Created()
|
||||
if created == nil {
|
||||
t.Fatal("expected created to be != nil; got nil")
|
||||
}
|
||||
if len(created) != 1 {
|
||||
t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
|
||||
}
|
||||
if created[0].Id != "2" {
|
||||
t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
|
||||
}
|
||||
if created[0].Status != 201 {
|
||||
t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
|
||||
}
|
||||
|
||||
// Deleted actions
|
||||
deleted := bulkResponse.Deleted()
|
||||
if deleted == nil {
|
||||
t.Fatal("expected deleted to be != nil; got nil")
|
||||
}
|
||||
if len(deleted) != 1 {
|
||||
t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
|
||||
}
|
||||
if deleted[0].Id != "1" {
|
||||
t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
|
||||
}
|
||||
if deleted[0].Status != 200 {
|
||||
t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
|
||||
}
|
||||
if !deleted[0].Found {
|
||||
t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found)
|
||||
}
|
||||
|
||||
// Updated actions
|
||||
updated := bulkResponse.Updated()
|
||||
if updated == nil {
|
||||
t.Fatal("expected updated to be != nil; got nil")
|
||||
}
|
||||
if len(updated) != 1 {
|
||||
t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
|
||||
}
|
||||
if updated[0].Id != "2" {
|
||||
t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
|
||||
}
|
||||
if updated[0].Status != 200 {
|
||||
t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
|
||||
}
|
||||
if updated[0].Version != 2 {
|
||||
t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
|
||||
}
|
||||
|
||||
// Succeeded actions
|
||||
succeeded := bulkResponse.Succeeded()
|
||||
if succeeded == nil {
|
||||
t.Fatal("expected succeeded to be != nil; got nil")
|
||||
}
|
||||
if len(succeeded) != 4 {
|
||||
t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded))
|
||||
}
|
||||
|
||||
// ById
|
||||
id1Results := bulkResponse.ById("1")
|
||||
if id1Results == nil {
|
||||
t.Fatal("expected id1Results to be != nil; got nil")
|
||||
}
|
||||
if len(id1Results) != 2 {
|
||||
t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
|
||||
}
|
||||
if id1Results[0].Id != "1" {
|
||||
t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
|
||||
}
|
||||
if id1Results[0].Status != 201 {
|
||||
t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
|
||||
}
|
||||
if id1Results[0].Version != 1 {
|
||||
t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
|
||||
}
|
||||
if id1Results[1].Id != "1" {
|
||||
t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
|
||||
}
|
||||
if id1Results[1].Status != 200 {
|
||||
t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
|
||||
}
|
||||
if id1Results[1].Version != 2 {
|
||||
t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFailedBulkRequests(t *testing.T) {
|
||||
js := `{
|
||||
"took" : 2,
|
||||
"errors" : true,
|
||||
"items" : [ {
|
||||
"index" : {
|
||||
"_index" : "elastic-test",
|
||||
"_type" : "tweet",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"status" : 201
|
||||
}
|
||||
}, {
|
||||
"create" : {
|
||||
"_index" : "elastic-test",
|
||||
"_type" : "tweet",
|
||||
"_id" : "2",
|
||||
"_version" : 1,
|
||||
"status" : 423,
|
||||
"error" : {
|
||||
"type":"routing_missing_exception",
|
||||
"reason":"routing is required for [elastic-test2]/[comment]/[1]"
|
||||
}
|
||||
}
|
||||
}, {
|
||||
"delete" : {
|
||||
"_index" : "elastic-test",
|
||||
"_type" : "tweet",
|
||||
"_id" : "1",
|
||||
"_version" : 2,
|
||||
"status" : 404,
|
||||
"found" : false
|
||||
}
|
||||
}, {
|
||||
"update" : {
|
||||
"_index" : "elastic-test",
|
||||
"_type" : "tweet",
|
||||
"_id" : "2",
|
||||
"_version" : 2,
|
||||
"status" : 200
|
||||
}
|
||||
} ]
|
||||
}`
|
||||
|
||||
var resp BulkResponse
|
||||
err := json.Unmarshal([]byte(js), &resp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
failed := resp.Failed()
|
||||
if len(failed) != 2 {
|
||||
t.Errorf("expected %d failed items; got: %d", 2, len(failed))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkEstimatedSizeInBytes(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
|
||||
|
||||
index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
|
||||
index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
|
||||
delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
|
||||
update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
|
||||
Doc(struct {
|
||||
Retweets int `json:"retweets"`
|
||||
}{
|
||||
Retweets: 42,
|
||||
})
|
||||
|
||||
bulkRequest := client.Bulk()
|
||||
bulkRequest = bulkRequest.Add(index1Req)
|
||||
bulkRequest = bulkRequest.Add(index2Req)
|
||||
bulkRequest = bulkRequest.Add(delete1Req)
|
||||
bulkRequest = bulkRequest.Add(update2Req)
|
||||
|
||||
if bulkRequest.NumberOfActions() != 4 {
|
||||
t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
|
||||
}
|
||||
|
||||
// The estimated size of the bulk request in bytes must be at least
|
||||
// the length of the body request.
|
||||
raw, err := bulkRequest.bodyAsString()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rawlen := int64(len([]byte(raw)))
|
||||
|
||||
if got, want := bulkRequest.EstimatedSizeInBytes(), rawlen; got < want {
|
||||
t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got)
|
||||
}
|
||||
|
||||
// Reset should also reset the calculated estimated byte size
|
||||
bulkRequest.reset()
|
||||
|
||||
if got, want := bulkRequest.EstimatedSizeInBytes(), int64(0); got != want {
|
||||
t.Errorf("expected an EstimatedSizeInBytes = %d; got: %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulkEstimateSizeInBytesLength(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
s := client.Bulk()
|
||||
r := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
|
||||
s = s.Add(r)
|
||||
if got, want := s.estimateSizeInBytes(r), int64(1+len(r.String())); got != want {
|
||||
t.Fatalf("expected %d; got: %d", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
var benchmarkBulkEstimatedSizeInBytes int64
|
||||
|
||||
func BenchmarkBulkEstimatedSizeInBytesWith1Request(b *testing.B) {
|
||||
client := setupTestClientAndCreateIndex(b)
|
||||
s := client.Bulk()
|
||||
var result int64
|
||||
for n := 0; n < b.N; n++ {
|
||||
s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"1"}))
|
||||
s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"2"}))
|
||||
s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1"))
|
||||
result = s.EstimatedSizeInBytes()
|
||||
s.reset()
|
||||
}
|
||||
b.ReportAllocs()
|
||||
benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
|
||||
}
|
||||
|
||||
func BenchmarkBulkEstimatedSizeInBytesWith100Requests(b *testing.B) {
|
||||
client := setupTestClientAndCreateIndex(b)
|
||||
s := client.Bulk()
|
||||
var result int64
|
||||
for n := 0; n < b.N; n++ {
|
||||
for i := 0; i < 100; i++ {
|
||||
s = s.Add(NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"1"}))
|
||||
s = s.Add(NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("1").Doc(struct{ A string }{"2"}))
|
||||
s = s.Add(NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1"))
|
||||
}
|
||||
result = s.EstimatedSizeInBytes()
|
||||
s.reset()
|
||||
}
|
||||
b.ReportAllocs()
|
||||
benchmarkBulkEstimatedSizeInBytes = result // ensure the compiler doesn't optimize
|
||||
}
|
||||
270
vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go
generated
vendored
Normal file
270
vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go
generated
vendored
Normal file
@@ -0,0 +1,270 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BulkUpdateRequest is a request to update a document in Elasticsearch.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for details.
|
||||
type BulkUpdateRequest struct {
|
||||
BulkableRequest
|
||||
index string
|
||||
typ string
|
||||
id string
|
||||
|
||||
routing string
|
||||
parent string
|
||||
script *Script
|
||||
scriptedUpsert *bool
|
||||
version int64 // default is MATCH_ANY
|
||||
versionType string // default is "internal"
|
||||
retryOnConflict *int
|
||||
upsert interface{}
|
||||
docAsUpsert *bool
|
||||
detectNoop *bool
|
||||
doc interface{}
|
||||
|
||||
source []string
|
||||
}
|
||||
|
||||
// NewBulkUpdateRequest returns a new BulkUpdateRequest.
|
||||
func NewBulkUpdateRequest() *BulkUpdateRequest {
|
||||
return &BulkUpdateRequest{}
|
||||
}
|
||||
|
||||
// Index specifies the Elasticsearch index to use for this update request.
|
||||
// If unspecified, the index set on the BulkService will be used.
|
||||
func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
|
||||
r.index = index
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Type specifies the Elasticsearch type to use for this update request.
|
||||
// If unspecified, the type set on the BulkService will be used.
|
||||
func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
|
||||
r.typ = typ
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Id specifies the identifier of the document to update.
|
||||
func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
|
||||
r.id = id
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Routing specifies a routing value for the request.
|
||||
func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
|
||||
r.routing = routing
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Parent specifies the identifier of the parent document (if available).
|
||||
func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
|
||||
r.parent = parent
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Script specifies an update script.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html#bulk-update
|
||||
// and https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html
|
||||
// for details.
|
||||
func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest {
|
||||
r.script = script
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// ScripedUpsert specifies if your script will run regardless of
|
||||
// whether the document exists or not.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_scripted_upsert_literal
|
||||
func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest {
|
||||
r.scriptedUpsert = &upsert
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// RetryOnConflict specifies how often to retry in case of a version conflict.
|
||||
func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
|
||||
r.retryOnConflict = &retryOnConflict
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Version indicates the version of the document as part of an optimistic
|
||||
// concurrency model.
|
||||
func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
|
||||
r.version = version
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// VersionType can be "internal" (default), "external", "external_gte",
|
||||
// "external_gt", or "force".
|
||||
func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
|
||||
r.versionType = versionType
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Doc specifies the updated document.
|
||||
func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
|
||||
r.doc = doc
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// DocAsUpsert indicates whether the contents of Doc should be used as
|
||||
// the Upsert value.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_doc_as_upsert_literal
|
||||
// for details.
|
||||
func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
|
||||
r.docAsUpsert = &docAsUpsert
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// DetectNoop specifies whether changes that don't affect the document
|
||||
// should be ignored (true) or unignored (false). This is enabled by default
|
||||
// in Elasticsearch.
|
||||
func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest {
|
||||
r.detectNoop = &detectNoop
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// Upsert specifies the document to use for upserts. It will be used for
|
||||
// create if the original document does not exist.
|
||||
func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
|
||||
r.upsert = doc
|
||||
r.source = nil
|
||||
return r
|
||||
}
|
||||
|
||||
// String returns the on-wire representation of the update request,
|
||||
// concatenated as a single string.
|
||||
func (r *BulkUpdateRequest) String() string {
|
||||
lines, err := r.Source()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error: %v", err)
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
|
||||
switch t := data.(type) {
|
||||
default:
|
||||
body, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(body), nil
|
||||
case json.RawMessage:
|
||||
return string(t), nil
|
||||
case *json.RawMessage:
|
||||
return string(*t), nil
|
||||
case string:
|
||||
return t, nil
|
||||
case *string:
|
||||
return *t, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Source returns the on-wire representation of the update request,
|
||||
// split into an action-and-meta-data line and an (optional) source line.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html
|
||||
// for details.
|
||||
func (r *BulkUpdateRequest) Source() ([]string, error) {
|
||||
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
|
||||
// { "doc" : { "field1" : "value1", ... } }
|
||||
// or
|
||||
// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
|
||||
// { "script" : { ... } }
|
||||
|
||||
if r.source != nil {
|
||||
return r.source, nil
|
||||
}
|
||||
|
||||
lines := make([]string, 2)
|
||||
|
||||
// "update" ...
|
||||
command := make(map[string]interface{})
|
||||
updateCommand := make(map[string]interface{})
|
||||
if r.index != "" {
|
||||
updateCommand["_index"] = r.index
|
||||
}
|
||||
if r.typ != "" {
|
||||
updateCommand["_type"] = r.typ
|
||||
}
|
||||
if r.id != "" {
|
||||
updateCommand["_id"] = r.id
|
||||
}
|
||||
if r.routing != "" {
|
||||
updateCommand["_routing"] = r.routing
|
||||
}
|
||||
if r.parent != "" {
|
||||
updateCommand["_parent"] = r.parent
|
||||
}
|
||||
if r.version > 0 {
|
||||
updateCommand["_version"] = r.version
|
||||
}
|
||||
if r.versionType != "" {
|
||||
updateCommand["_version_type"] = r.versionType
|
||||
}
|
||||
if r.retryOnConflict != nil {
|
||||
updateCommand["_retry_on_conflict"] = *r.retryOnConflict
|
||||
}
|
||||
command["update"] = updateCommand
|
||||
line, err := json.Marshal(command)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lines[0] = string(line)
|
||||
|
||||
// 2nd line: {"doc" : { ... }} or {"script": {...}}
|
||||
source := make(map[string]interface{})
|
||||
if r.docAsUpsert != nil {
|
||||
source["doc_as_upsert"] = *r.docAsUpsert
|
||||
}
|
||||
if r.detectNoop != nil {
|
||||
source["detect_noop"] = *r.detectNoop
|
||||
}
|
||||
if r.upsert != nil {
|
||||
source["upsert"] = r.upsert
|
||||
}
|
||||
if r.scriptedUpsert != nil {
|
||||
source["scripted_upsert"] = *r.scriptedUpsert
|
||||
}
|
||||
if r.doc != nil {
|
||||
// {"doc":{...}}
|
||||
source["doc"] = r.doc
|
||||
} else if r.script != nil {
|
||||
// {"script":...}
|
||||
src, err := r.script.Source()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
source["script"] = src
|
||||
}
|
||||
lines[1], err = r.getSourceAsString(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.source = lines
|
||||
return lines, nil
|
||||
}
|
||||
121
vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go
generated
vendored
Normal file
121
vendor/gopkg.in/olivere/elastic.v5/bulk_update_request_test.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBulkUpdateRequestSerialization(t *testing.T) {
|
||||
tests := []struct {
|
||||
Request BulkableRequest
|
||||
Expected []string
|
||||
}{
|
||||
// #0
|
||||
{
|
||||
Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
|
||||
Counter int64 `json:"counter"`
|
||||
}{
|
||||
Counter: 42,
|
||||
}),
|
||||
Expected: []string{
|
||||
`{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
|
||||
`{"doc":{"counter":42}}`,
|
||||
},
|
||||
},
|
||||
// #1
|
||||
{
|
||||
Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
|
||||
RetryOnConflict(3).
|
||||
DocAsUpsert(true).
|
||||
Doc(struct {
|
||||
Counter int64 `json:"counter"`
|
||||
}{
|
||||
Counter: 42,
|
||||
}),
|
||||
Expected: []string{
|
||||
`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
|
||||
`{"doc":{"counter":42},"doc_as_upsert":true}`,
|
||||
},
|
||||
},
|
||||
// #2
|
||||
{
|
||||
Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
|
||||
RetryOnConflict(3).
|
||||
Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
|
||||
Upsert(struct {
|
||||
Counter int64 `json:"counter"`
|
||||
}{
|
||||
Counter: 42,
|
||||
}),
|
||||
Expected: []string{
|
||||
`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
|
||||
`{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}},"upsert":{"counter":42}}`,
|
||||
},
|
||||
},
|
||||
// #3
|
||||
{
|
||||
Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").DetectNoop(true).Doc(struct {
|
||||
Counter int64 `json:"counter"`
|
||||
}{
|
||||
Counter: 42,
|
||||
}),
|
||||
Expected: []string{
|
||||
`{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
|
||||
`{"detect_noop":true,"doc":{"counter":42}}`,
|
||||
},
|
||||
},
|
||||
// #4
|
||||
{
|
||||
Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
|
||||
RetryOnConflict(3).
|
||||
ScriptedUpsert(true).
|
||||
Script(NewScript(`ctx._source.retweets += param1`).Lang("javascript").Param("param1", 42)).
|
||||
Upsert(struct {
|
||||
Counter int64 `json:"counter"`
|
||||
}{
|
||||
Counter: 42,
|
||||
}),
|
||||
Expected: []string{
|
||||
`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
|
||||
`{"script":{"inline":"ctx._source.retweets += param1","lang":"javascript","params":{"param1":42}},"scripted_upsert":true,"upsert":{"counter":42}}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
lines, err := test.Request.Source()
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: expected no error, got: %v", i, err)
|
||||
}
|
||||
if lines == nil {
|
||||
t.Fatalf("#%d: expected lines, got nil", i)
|
||||
}
|
||||
if len(lines) != len(test.Expected) {
|
||||
t.Fatalf("#%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
|
||||
}
|
||||
for j, line := range lines {
|
||||
if line != test.Expected[j] {
|
||||
t.Errorf("#%d: expected line #%d to be\n%s\nbut got:\n%s", i, j, test.Expected[j], line)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var bulkUpdateRequestSerializationResult string
|
||||
|
||||
func BenchmarkBulkUpdateRequestSerialization(b *testing.B) {
|
||||
r := NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
|
||||
Counter int64 `json:"counter"`
|
||||
}{
|
||||
Counter: 42,
|
||||
})
|
||||
var s string
|
||||
for n := 0; n < b.N; n++ {
|
||||
s = r.String()
|
||||
r.source = nil // Don't let caching spoil the benchmark
|
||||
}
|
||||
bulkUpdateRequestSerializationResult = s // ensure the compiler doesn't optimize
|
||||
}
|
||||
34
vendor/gopkg.in/olivere/elastic.v5/canonicalize.go
generated
vendored
Normal file
34
vendor/gopkg.in/olivere/elastic.v5/canonicalize.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import "net/url"
|
||||
|
||||
// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
|
||||
// remove anything but scheme, userinfo, host, path, and port.
|
||||
// It also removes all trailing slashes. Invalid URLs or URLs that do not
|
||||
// use protocol http or https are skipped.
|
||||
//
|
||||
// Example:
|
||||
// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200
|
||||
// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1
|
||||
func canonicalize(rawurls ...string) []string {
|
||||
var canonicalized []string
|
||||
for _, rawurl := range rawurls {
|
||||
u, err := url.Parse(rawurl)
|
||||
if err == nil {
|
||||
if u.Scheme == "http" || u.Scheme == "https" {
|
||||
// Trim trailing slashes
|
||||
for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' {
|
||||
u.Path = u.Path[0 : len(u.Path)-1]
|
||||
}
|
||||
u.Fragment = ""
|
||||
u.RawQuery = ""
|
||||
canonicalized = append(canonicalized, u.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return canonicalized
|
||||
}
|
||||
72
vendor/gopkg.in/olivere/elastic.v5/canonicalize_test.go
generated
vendored
Normal file
72
vendor/gopkg.in/olivere/elastic.v5/canonicalize_test.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCanonicalize(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input []string
|
||||
Output []string
|
||||
}{
|
||||
// #0
|
||||
{
|
||||
Input: []string{"http://127.0.0.1/"},
|
||||
Output: []string{"http://127.0.0.1"},
|
||||
},
|
||||
// #1
|
||||
{
|
||||
Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"},
|
||||
Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"},
|
||||
},
|
||||
// #2
|
||||
{
|
||||
Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"},
|
||||
Output: []string{"http://user:secret@127.0.0.1/path"},
|
||||
},
|
||||
// #3
|
||||
{
|
||||
Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"},
|
||||
Output: []string{"https://somewhere.on.mars:9999/path"},
|
||||
},
|
||||
// #4
|
||||
{
|
||||
Input: []string{"https://prod1:9999/one?query=1#fragment", "https://prod2:9998/two?query=1#fragment"},
|
||||
Output: []string{"https://prod1:9999/one", "https://prod2:9998/two"},
|
||||
},
|
||||
// #5
|
||||
{
|
||||
Input: []string{"http://127.0.0.1/one/"},
|
||||
Output: []string{"http://127.0.0.1/one"},
|
||||
},
|
||||
// #6
|
||||
{
|
||||
Input: []string{"http://127.0.0.1/one///"},
|
||||
Output: []string{"http://127.0.0.1/one"},
|
||||
},
|
||||
// #7: Invalid URL
|
||||
{
|
||||
Input: []string{"127.0.0.1/"},
|
||||
Output: []string{},
|
||||
},
|
||||
// #8: Invalid URL
|
||||
{
|
||||
Input: []string{"127.0.0.1:9200"},
|
||||
Output: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
got := canonicalize(test.Input...)
|
||||
if want, have := len(test.Output), len(got); want != have {
|
||||
t.Fatalf("#%d: expected %d elements; got: %d", i, want, have)
|
||||
}
|
||||
for i := 0; i < len(got); i++ {
|
||||
if want, have := test.Output[i], got[i]; want != have {
|
||||
t.Errorf("#%d: expected %q; got: %q", i, want, have)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
103
vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go
generated
vendored
Normal file
103
vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// ClearScrollService clears one or more scroll contexts by their ids.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#_clear_scroll_api
|
||||
// for details.
|
||||
type ClearScrollService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
scrollId []string
|
||||
}
|
||||
|
||||
// NewClearScrollService creates a new ClearScrollService.
|
||||
func NewClearScrollService(client *Client) *ClearScrollService {
|
||||
return &ClearScrollService{
|
||||
client: client,
|
||||
scrollId: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// ScrollId is a list of scroll IDs to clear.
|
||||
// Use _all to clear all search contexts.
|
||||
func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService {
|
||||
s.scrollId = append(s.scrollId, scrollIds...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *ClearScrollService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path := "/_search/scroll/"
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *ClearScrollService) Validate() error {
|
||||
var invalid []string
|
||||
if len(s.scrollId) == 0 {
|
||||
invalid = append(invalid, "ScrollId")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
body := map[string][]string{
|
||||
"scroll_id": s.scrollId,
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(ClearScrollResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ClearScrollResponse is the response of ClearScrollService.Do.
|
||||
type ClearScrollResponse struct {
|
||||
}
|
||||
87
vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go
generated
vendored
Normal file
87
vendor/gopkg.in/olivere/elastic.v5/clear_scroll_test.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClearScroll(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
// client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", log.LstdFlags)))
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
|
||||
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
|
||||
|
||||
// Add all documents
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Match all should return all documents
|
||||
res, err := client.Scroll(testIndexName).Size(1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatal("expected results != nil; got nil")
|
||||
}
|
||||
if res.ScrollId == "" {
|
||||
t.Fatalf("expected scrollId in results; got %q", res.ScrollId)
|
||||
}
|
||||
|
||||
// Search should succeed
|
||||
_, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Clear scroll id
|
||||
clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if clearScrollRes == nil {
|
||||
t.Fatal("expected results != nil; got nil")
|
||||
}
|
||||
|
||||
// Search result should fail
|
||||
_, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected scroll to fail")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearScrollValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No scroll id -> fail with error
|
||||
res, err := NewClearScrollService(client).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected ClearScroll to fail without scroll ids")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
1718
vendor/gopkg.in/olivere/elastic.v5/client.go
generated
vendored
Normal file
1718
vendor/gopkg.in/olivere/elastic.v5/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1108
vendor/gopkg.in/olivere/elastic.v5/client_test.go
generated
vendored
Normal file
1108
vendor/gopkg.in/olivere/elastic.v5/client_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
16
vendor/gopkg.in/olivere/elastic.v5/cluster-test/Makefile
generated
vendored
Normal file
16
vendor/gopkg.in/olivere/elastic.v5/cluster-test/Makefile
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
.PHONY: build run-omega-cluster-test
|
||||
|
||||
default: build
|
||||
|
||||
build:
|
||||
go build cluster-test.go
|
||||
|
||||
run-omega-cluster-test:
|
||||
go run -race cluster-test.go \
|
||||
-nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
|
||||
-n=5 \
|
||||
-retries=5 \
|
||||
-sniff=true -sniffer=10s \
|
||||
-healthcheck=true -healthchecker=5s \
|
||||
-errorlog=errors.log
|
||||
|
||||
63
vendor/gopkg.in/olivere/elastic.v5/cluster-test/README.md
generated
vendored
Normal file
63
vendor/gopkg.in/olivere/elastic.v5/cluster-test/README.md
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# Cluster Test
|
||||
|
||||
This directory contains a program you can use to test a cluster.
|
||||
|
||||
Here's how:
|
||||
|
||||
First, install a cluster of Elasticsearch nodes. You can install them on
|
||||
different computers, or start several nodes on a single machine.
|
||||
|
||||
Build cluster-test by `go build cluster-test.go` (or build with `make`).
|
||||
|
||||
Run `./cluster-test -h` to get a list of flags:
|
||||
|
||||
```sh
|
||||
$ ./cluster-test -h
|
||||
Usage of ./cluster-test:
|
||||
-errorlog="": error log file
|
||||
-healthcheck=true: enable or disable healthchecks
|
||||
-healthchecker=1m0s: healthcheck interval
|
||||
-index="twitter": name of ES index to use
|
||||
-infolog="": info log file
|
||||
-n=5: number of goroutines that run searches
|
||||
-nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
|
||||
-retries=0: number of retries
|
||||
-sniff=true: enable or disable sniffer
|
||||
-sniffer=15m0s: sniffer interval
|
||||
-tracelog="": trace log file
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```sh
|
||||
$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
|
||||
```
|
||||
|
||||
The above example will create an index and start some search jobs on the
|
||||
cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
|
||||
and http://127.0.0.1:9202.
|
||||
|
||||
* It will create an index called `twitter` on the cluster (`-index=twitter`)
|
||||
* It will run 5 search jobs in parallel (`-n=5`).
|
||||
* It will retry failed requests 5 times (`-retries=5`).
|
||||
* It will sniff the cluster periodically (`-sniff=true`).
|
||||
* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
|
||||
* It will perform health checks periodically (`-healthcheck=true`).
|
||||
* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
|
||||
* It will write an error log file (`-errorlog=error.log`).
|
||||
|
||||
If you want to test Elastic with nodes going up and down, you can use a
|
||||
chaos monkey script like this and run it on the nodes of your cluster:
|
||||
|
||||
```sh
|
||||
#!/bin/bash
|
||||
while true
|
||||
do
|
||||
echo "Starting ES node"
|
||||
elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
|
||||
sleep `jot -r 1 10 300` # wait for 10-300s
|
||||
echo "Stopping ES node"
|
||||
kill -TERM `cat es.pid`
|
||||
sleep `jot -r 1 10 60` # wait for 10-60s
|
||||
done
|
||||
```
|
||||
361
vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go
generated
vendored
Normal file
361
vendor/gopkg.in/olivere/elastic.v5/cluster-test/cluster-test.go
generated
vendored
Normal file
@@ -0,0 +1,361 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
elastic "gopkg.in/olivere/elastic.v5"
|
||||
)
|
||||
|
||||
type Tweet struct {
|
||||
User string `json:"user"`
|
||||
Message string `json:"message"`
|
||||
Retweets int `json:"retweets"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Location string `json:"location,omitempty"`
|
||||
Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
|
||||
}
|
||||
|
||||
var (
|
||||
nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
|
||||
n = flag.Int("n", 5, "number of goroutines that run searches")
|
||||
index = flag.String("index", "twitter", "name of ES index to use")
|
||||
errorlogfile = flag.String("errorlog", "", "error log file")
|
||||
infologfile = flag.String("infolog", "", "info log file")
|
||||
tracelogfile = flag.String("tracelog", "", "trace log file")
|
||||
retries = flag.Int("retries", 0, "number of retries")
|
||||
sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
|
||||
sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
|
||||
healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
|
||||
healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
if *nodes == "" {
|
||||
log.Fatal("no nodes specified")
|
||||
}
|
||||
urls := strings.SplitN(*nodes, ",", -1)
|
||||
|
||||
testcase, err := NewTestCase(*index, urls)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
testcase.SetErrorLogFile(*errorlogfile)
|
||||
testcase.SetInfoLogFile(*infologfile)
|
||||
testcase.SetTraceLogFile(*tracelogfile)
|
||||
testcase.SetMaxRetries(*retries)
|
||||
testcase.SetHealthcheck(*healthcheck)
|
||||
testcase.SetHealthcheckInterval(*healthchecker)
|
||||
testcase.SetSniff(*sniff)
|
||||
testcase.SetSnifferInterval(*sniffer)
|
||||
|
||||
if err := testcase.Run(*n); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
type RunInfo struct {
|
||||
Success bool
|
||||
}
|
||||
|
||||
type TestCase struct {
|
||||
nodes []string
|
||||
client *elastic.Client
|
||||
runs int64
|
||||
failures int64
|
||||
runCh chan RunInfo
|
||||
index string
|
||||
errorlogfile string
|
||||
infologfile string
|
||||
tracelogfile string
|
||||
maxRetries int
|
||||
healthcheck bool
|
||||
healthcheckInterval time.Duration
|
||||
sniff bool
|
||||
snifferInterval time.Duration
|
||||
}
|
||||
|
||||
func NewTestCase(index string, nodes []string) (*TestCase, error) {
|
||||
if index == "" {
|
||||
return nil, errors.New("no index name specified")
|
||||
}
|
||||
|
||||
return &TestCase{
|
||||
index: index,
|
||||
nodes: nodes,
|
||||
runCh: make(chan RunInfo),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *TestCase) SetIndex(name string) {
|
||||
t.index = name
|
||||
}
|
||||
|
||||
func (t *TestCase) SetErrorLogFile(name string) {
|
||||
t.errorlogfile = name
|
||||
}
|
||||
|
||||
func (t *TestCase) SetInfoLogFile(name string) {
|
||||
t.infologfile = name
|
||||
}
|
||||
|
||||
func (t *TestCase) SetTraceLogFile(name string) {
|
||||
t.tracelogfile = name
|
||||
}
|
||||
|
||||
func (t *TestCase) SetMaxRetries(n int) {
|
||||
t.maxRetries = n
|
||||
}
|
||||
|
||||
func (t *TestCase) SetSniff(enabled bool) {
|
||||
t.sniff = enabled
|
||||
}
|
||||
|
||||
func (t *TestCase) SetSnifferInterval(d time.Duration) {
|
||||
t.snifferInterval = d
|
||||
}
|
||||
|
||||
func (t *TestCase) SetHealthcheck(enabled bool) {
|
||||
t.healthcheck = enabled
|
||||
}
|
||||
|
||||
func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
|
||||
t.healthcheckInterval = d
|
||||
}
|
||||
|
||||
func (t *TestCase) Run(n int) error {
|
||||
if err := t.setup(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 1; i < n; i++ {
|
||||
go t.search()
|
||||
}
|
||||
|
||||
go t.monitor()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestCase) monitor() {
|
||||
print := func() {
|
||||
fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ")
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case run := <-t.runCh:
|
||||
atomic.AddInt64(&t.runs, 1)
|
||||
if !run.Success {
|
||||
atomic.AddInt64(&t.failures, 1)
|
||||
fmt.Println()
|
||||
}
|
||||
print()
|
||||
case <-time.After(5 * time.Second):
|
||||
// Print stats after some inactivity
|
||||
print()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestCase) setup() error {
|
||||
var errorlogger *log.Logger
|
||||
if t.errorlogfile != "" {
|
||||
f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
|
||||
}
|
||||
|
||||
var infologger *log.Logger
|
||||
if t.infologfile != "" {
|
||||
f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
infologger = log.New(f, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
// Trace request and response details like this
|
||||
var tracelogger *log.Logger
|
||||
if t.tracelogfile != "" {
|
||||
f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tracelogger = log.New(f, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
client, err := elastic.NewClient(
|
||||
elastic.SetURL(t.nodes...),
|
||||
elastic.SetErrorLog(errorlogger),
|
||||
elastic.SetInfoLog(infologger),
|
||||
elastic.SetTraceLog(tracelogger),
|
||||
elastic.SetMaxRetries(t.maxRetries),
|
||||
elastic.SetSniff(t.sniff),
|
||||
elastic.SetSnifferInterval(t.snifferInterval),
|
||||
elastic.SetHealthcheck(t.healthcheck),
|
||||
elastic.SetHealthcheckInterval(t.healthcheckInterval))
|
||||
if err != nil {
|
||||
// Handle error
|
||||
return err
|
||||
}
|
||||
t.client = client
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Use the IndexExists service to check if a specified index exists.
|
||||
exists, err := t.client.IndexExists(t.index).Do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
deleteIndex, err := t.client.DeleteIndex(t.index).Do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
return errors.New("delete index not acknowledged")
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new index.
|
||||
createIndex, err := t.client.CreateIndex(t.index).Do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
return errors.New("create index not acknowledged")
|
||||
}
|
||||
|
||||
// Index a tweet (using JSON serialization)
|
||||
tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
|
||||
_, err = t.client.Index().
|
||||
Index(t.index).
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
BodyJson(tweet1).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Index a second tweet (by string)
|
||||
tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
|
||||
_, err = t.client.Index().
|
||||
Index(t.index).
|
||||
Type("tweet").
|
||||
Id("2").
|
||||
BodyString(tweet2).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush to make sure the documents got written.
|
||||
_, err = t.client.Flush().Index(t.index).Do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestCase) search() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Loop forever to check for connection issues
|
||||
for {
|
||||
// Get tweet with specified ID
|
||||
get1, err := t.client.Get().
|
||||
Index(t.index).
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
//failf("Get failed: %v", err)
|
||||
t.runCh <- RunInfo{Success: false}
|
||||
continue
|
||||
}
|
||||
if !get1.Found {
|
||||
//log.Printf("Document %s not found\n", "1")
|
||||
//fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
|
||||
t.runCh <- RunInfo{Success: false}
|
||||
continue
|
||||
}
|
||||
|
||||
// Search with a term query
|
||||
searchResult, err := t.client.Search().
|
||||
Index(t.index). // search in index t.index
|
||||
Query(elastic.NewTermQuery("user", "olivere")). // specify the query
|
||||
Sort("user", true). // sort by "user" field, ascending
|
||||
From(0).Size(10). // take documents 0-9
|
||||
Pretty(true). // pretty print request and response JSON
|
||||
Do(ctx) // execute
|
||||
if err != nil {
|
||||
//failf("Search failed: %v\n", err)
|
||||
t.runCh <- RunInfo{Success: false}
|
||||
continue
|
||||
}
|
||||
|
||||
// searchResult is of type SearchResult and returns hits, suggestions,
|
||||
// and all kinds of other information from Elasticsearch.
|
||||
//fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
|
||||
|
||||
// Number of hits
|
||||
if searchResult.Hits.TotalHits > 0 {
|
||||
//fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
|
||||
|
||||
// Iterate through results
|
||||
for _, hit := range searchResult.Hits.Hits {
|
||||
// hit.Index contains the name of the index
|
||||
|
||||
// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
|
||||
var tweet Tweet
|
||||
err := json.Unmarshal(*hit.Source, &tweet)
|
||||
if err != nil {
|
||||
// Deserialization failed
|
||||
//failf("Deserialize failed: %v\n", err)
|
||||
t.runCh <- RunInfo{Success: false}
|
||||
continue
|
||||
}
|
||||
|
||||
// Work with tweet
|
||||
//fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
} else {
|
||||
// No hits
|
||||
//fmt.Print("Found no tweets\n")
|
||||
}
|
||||
|
||||
t.runCh <- RunInfo{Success: true}
|
||||
|
||||
// Sleep some time
|
||||
time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
244
vendor/gopkg.in/olivere/elastic.v5/cluster_health.go
generated
vendored
Normal file
244
vendor/gopkg.in/olivere/elastic.v5/cluster_health.go
generated
vendored
Normal file
@@ -0,0 +1,244 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// ClusterHealthService allows to get a very simple status on the health of the cluster.
|
||||
//
|
||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-health.html
|
||||
// for details.
|
||||
type ClusterHealthService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
indices []string
|
||||
level string
|
||||
local *bool
|
||||
masterTimeout string
|
||||
timeout string
|
||||
waitForActiveShards *int
|
||||
waitForNodes string
|
||||
waitForNoRelocatingShards *bool
|
||||
waitForStatus string
|
||||
}
|
||||
|
||||
// NewClusterHealthService creates a new ClusterHealthService.
|
||||
func NewClusterHealthService(client *Client) *ClusterHealthService {
|
||||
return &ClusterHealthService{
|
||||
client: client,
|
||||
indices: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index limits the information returned to specific indices.
|
||||
func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService {
|
||||
s.indices = append(s.indices, indices...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Level specifies the level of detail for returned information.
|
||||
func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
|
||||
s.level = level
|
||||
return s
|
||||
}
|
||||
|
||||
// Local indicates whether to return local information. If it is true,
|
||||
// we do not retrieve the state from master node (default: false).
|
||||
func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// MasterTimeout specifies an explicit operation timeout for connection to master node.
|
||||
func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
|
||||
s.masterTimeout = masterTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout specifies an explicit operation timeout.
|
||||
func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForActiveShards can be used to wait until the specified number of shards are active.
|
||||
func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
|
||||
s.waitForActiveShards = &waitForActiveShards
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForNodes can be used to wait until the specified number of nodes are available.
|
||||
// Example: "12" to wait for exact values, ">12" and "<12" for ranges.
|
||||
func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
|
||||
s.waitForNodes = waitForNodes
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished.
|
||||
func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService {
|
||||
s.waitForNoRelocatingShards = &waitForNoRelocatingShards
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForStatus can be used to wait until the cluster is in a specific state.
|
||||
// Valid values are: green, yellow, or red.
|
||||
func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
|
||||
s.waitForStatus = waitForStatus
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForGreenStatus will wait for the "green" state.
|
||||
func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService {
|
||||
return s.WaitForStatus("green")
|
||||
}
|
||||
|
||||
// WaitForYellowStatus will wait for the "yellow" state.
|
||||
func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService {
|
||||
return s.WaitForStatus("yellow")
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
var err error
|
||||
var path string
|
||||
if len(s.indices) > 0 {
|
||||
path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{
|
||||
"index": strings.Join(s.indices, ","),
|
||||
})
|
||||
} else {
|
||||
path = "/_cluster/health"
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.level != "" {
|
||||
params.Set("level", s.level)
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
if s.masterTimeout != "" {
|
||||
params.Set("master_timeout", s.masterTimeout)
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.waitForActiveShards != nil {
|
||||
params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards))
|
||||
}
|
||||
if s.waitForNodes != "" {
|
||||
params.Set("wait_for_nodes", s.waitForNodes)
|
||||
}
|
||||
if s.waitForNoRelocatingShards != nil {
|
||||
params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards))
|
||||
}
|
||||
if s.waitForStatus != "" {
|
||||
params.Set("wait_for_status", s.waitForStatus)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *ClusterHealthService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(ClusterHealthResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ClusterHealthResponse is the response of ClusterHealthService.Do.
|
||||
type ClusterHealthResponse struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Status string `json:"status"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
NumberOfNodes int `json:"number_of_nodes"`
|
||||
NumberOfDataNodes int `json:"number_of_data_nodes"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
DelayedUnassignedShards int `json:"delayed_unassigned_shards"`
|
||||
NumberOfPendingTasks int `json:"number_of_pending_tasks"`
|
||||
NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"`
|
||||
TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"`
|
||||
ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"`
|
||||
|
||||
// Validation failures -> index name -> array of validation failures
|
||||
ValidationFailures []map[string][]string `json:"validation_failures"`
|
||||
|
||||
// Index name -> index health
|
||||
Indices map[string]*ClusterIndexHealth `json:"indices"`
|
||||
}
|
||||
|
||||
// ClusterIndexHealth will be returned as part of ClusterHealthResponse.
|
||||
type ClusterIndexHealth struct {
|
||||
Status string `json:"status"`
|
||||
NumberOfShards int `json:"number_of_shards"`
|
||||
NumberOfReplicas int `json:"number_of_replicas"`
|
||||
ActivePrimaryShards int `json:"active_primary_shards"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
// Validation failures
|
||||
ValidationFailures []string `json:"validation_failures"`
|
||||
// Shards by id, e.g. "0" or "1"
|
||||
Shards map[string]*ClusterShardHealth `json:"shards"`
|
||||
}
|
||||
|
||||
// ClusterShardHealth will be returned as part of ClusterHealthResponse.
|
||||
type ClusterShardHealth struct {
|
||||
Status string `json:"status"`
|
||||
PrimaryActive bool `json:"primary_active"`
|
||||
ActiveShards int `json:"active_shards"`
|
||||
RelocatingShards int `json:"relocating_shards"`
|
||||
InitializingShards int `json:"initializing_shards"`
|
||||
UnassignedShards int `json:"unassigned_shards"`
|
||||
}
|
||||
119
vendor/gopkg.in/olivere/elastic.v5/cluster_health_test.go
generated
vendored
Normal file
119
vendor/gopkg.in/olivere/elastic.v5/cluster_health_test.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClusterHealth(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
// Get cluster health
|
||||
res, err := client.ClusterHealth().Index(testIndexName).Level("shards").Pretty(true).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatalf("expected res to be != nil; got: %v", res)
|
||||
}
|
||||
if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
|
||||
t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterHealthURLs(t *testing.T) {
|
||||
tests := []struct {
|
||||
Service *ClusterHealthService
|
||||
ExpectedPath string
|
||||
ExpectedParams url.Values
|
||||
}{
|
||||
{
|
||||
Service: &ClusterHealthService{
|
||||
indices: []string{},
|
||||
},
|
||||
ExpectedPath: "/_cluster/health",
|
||||
},
|
||||
{
|
||||
Service: &ClusterHealthService{
|
||||
indices: []string{"twitter"},
|
||||
},
|
||||
ExpectedPath: "/_cluster/health/twitter",
|
||||
},
|
||||
{
|
||||
Service: &ClusterHealthService{
|
||||
indices: []string{"twitter", "gplus"},
|
||||
},
|
||||
ExpectedPath: "/_cluster/health/twitter%2Cgplus",
|
||||
},
|
||||
{
|
||||
Service: &ClusterHealthService{
|
||||
indices: []string{"twitter"},
|
||||
waitForStatus: "yellow",
|
||||
},
|
||||
ExpectedPath: "/_cluster/health/twitter",
|
||||
ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
gotPath, gotParams, err := test.Service.buildURL()
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if gotPath != test.ExpectedPath {
|
||||
t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
|
||||
}
|
||||
if gotParams.Encode() != test.ExpectedParams.Encode() {
|
||||
t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterHealthWaitForStatus(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
|
||||
// Ensure preconditions are met: A green cluster.
|
||||
health, err := client.ClusterHealth().Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := health.Status, "green"; got != want {
|
||||
t.Skipf("precondition failed: expected cluster to be %q, not %q", want, got)
|
||||
}
|
||||
|
||||
// Cluster health on an index that does not exist should never get to yellow
|
||||
health, err = client.ClusterHealth().Index("no-such-index").WaitForStatus("yellow").Timeout("1s").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected timeout error; got: %v", err)
|
||||
}
|
||||
if !IsTimeout(err) {
|
||||
t.Fatalf("expected timeout error; got: %v", err)
|
||||
}
|
||||
if health != nil {
|
||||
t.Fatalf("expected no response; got: %v", health)
|
||||
}
|
||||
|
||||
// Cluster wide health
|
||||
health, err = client.ClusterHealth().WaitForGreenStatus().Timeout("10s").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if health.TimedOut != false {
|
||||
t.Fatalf("expected no timeout; got: %v "+
|
||||
"(does your local cluster contain unassigned shards?)", health.TimedOut)
|
||||
}
|
||||
if health.Status != "green" {
|
||||
t.Fatalf("expected health = %q; got: %q", "green", health.Status)
|
||||
}
|
||||
|
||||
// Cluster wide health via shortcut on client
|
||||
err = client.WaitForGreenStatus("10s")
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
}
|
||||
284
vendor/gopkg.in/olivere/elastic.v5/cluster_state.go
generated
vendored
Normal file
284
vendor/gopkg.in/olivere/elastic.v5/cluster_state.go
generated
vendored
Normal file
@@ -0,0 +1,284 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// ClusterStateService allows to get a comprehensive state information of the whole cluster.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-state.html
|
||||
// for details.
|
||||
type ClusterStateService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
indices []string
|
||||
metrics []string
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
flatSettings *bool
|
||||
ignoreUnavailable *bool
|
||||
local *bool
|
||||
masterTimeout string
|
||||
}
|
||||
|
||||
// NewClusterStateService creates a new ClusterStateService.
|
||||
func NewClusterStateService(client *Client) *ClusterStateService {
|
||||
return &ClusterStateService{
|
||||
client: client,
|
||||
indices: make([]string, 0),
|
||||
metrics: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names. Use _all or an empty string to
|
||||
// perform the operation on all indices.
|
||||
func (s *ClusterStateService) Index(indices ...string) *ClusterStateService {
|
||||
s.indices = append(s.indices, indices...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Metric limits the information returned to the specified metric.
|
||||
// It can be one of: version, master_node, nodes, routing_table, metadata,
|
||||
// blocks, or customs.
|
||||
func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService {
|
||||
s.metrics = append(s.metrics, metrics...)
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices.
|
||||
// (This includes `_all` string or when no indices have been specified).
|
||||
func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both..
|
||||
func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatSettings, when set, returns settings in flat format (default: false).
|
||||
func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
|
||||
s.flatSettings = &flatSettings
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// Local indicates whether to return local information. When set, it does not
|
||||
// retrieve the state from master node (default: false).
|
||||
func (s *ClusterStateService) Local(local bool) *ClusterStateService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// MasterTimeout specifies timeout for connection to master.
|
||||
func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
|
||||
s.masterTimeout = masterTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *ClusterStateService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
metrics := strings.Join(s.metrics, ",")
|
||||
if metrics == "" {
|
||||
metrics = "_all"
|
||||
}
|
||||
indices := strings.Join(s.indices, ",")
|
||||
if indices == "" {
|
||||
indices = "_all"
|
||||
}
|
||||
path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
|
||||
"metrics": metrics,
|
||||
"indices": indices,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.flatSettings != nil {
|
||||
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
if s.masterTimeout != "" {
|
||||
params.Set("master_timeout", s.masterTimeout)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *ClusterStateService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(ClusterStateResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ClusterStateResponse is the response of ClusterStateService.Do.
|
||||
type ClusterStateResponse struct {
|
||||
ClusterName string `json:"cluster_name"`
|
||||
Version int64 `json:"version"`
|
||||
StateUUID string `json:"state_uuid"`
|
||||
MasterNode string `json:"master_node"`
|
||||
Blocks map[string]*clusterBlocks `json:"blocks"`
|
||||
Nodes map[string]*discoveryNode `json:"nodes"`
|
||||
Metadata *clusterStateMetadata `json:"metadata"`
|
||||
RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"`
|
||||
RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"`
|
||||
Customs map[string]interface{} `json:"customs"`
|
||||
}
|
||||
|
||||
type clusterBlocks struct {
|
||||
Global map[string]*clusterBlock `json:"global"` // id -> cluster block
|
||||
Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block
|
||||
}
|
||||
|
||||
type clusterBlock struct {
|
||||
Description string `json:"description"`
|
||||
Retryable bool `json:"retryable"`
|
||||
DisableStatePersistence bool `json:"disable_state_persistence"`
|
||||
Levels []string `json:"levels"`
|
||||
}
|
||||
|
||||
type clusterStateMetadata struct {
|
||||
ClusterUUID string `json:"cluster_uuid"`
|
||||
Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata
|
||||
Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data
|
||||
RoutingTable struct {
|
||||
Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table
|
||||
} `json:"routing_table"`
|
||||
RoutingNodes struct {
|
||||
Unassigned []*shardRouting `json:"unassigned"`
|
||||
Nodes []*shardRouting `json:"nodes"`
|
||||
} `json:"routing_nodes"`
|
||||
Customs map[string]interface{} `json:"customs"`
|
||||
}
|
||||
|
||||
type discoveryNode struct {
|
||||
Name string `json:"name"` // server name, e.g. "es1"
|
||||
TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300]
|
||||
Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true }
|
||||
}
|
||||
|
||||
type clusterStateRoutingTable struct {
|
||||
Indices map[string]interface{} `json:"indices"`
|
||||
}
|
||||
|
||||
type clusterStateRoutingNode struct {
|
||||
Unassigned []*shardRouting `json:"unassigned"`
|
||||
// Node Id -> shardRouting
|
||||
Nodes map[string][]*shardRouting `json:"nodes"`
|
||||
}
|
||||
|
||||
type indexTemplateMetaData struct {
|
||||
Template string `json:"template"` // e.g. "store-*"
|
||||
Order int `json:"order"`
|
||||
Settings map[string]interface{} `json:"settings"` // index settings
|
||||
Mappings map[string]interface{} `json:"mappings"` // type name -> mapping
|
||||
}
|
||||
|
||||
type indexMetaData struct {
|
||||
State string `json:"state"`
|
||||
Settings map[string]interface{} `json:"settings"`
|
||||
Mappings map[string]interface{} `json:"mappings"`
|
||||
Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ]
|
||||
}
|
||||
|
||||
type indexRoutingTable struct {
|
||||
Shards map[string]*shardRouting `json:"shards"`
|
||||
}
|
||||
|
||||
type shardRouting struct {
|
||||
State string `json:"state"`
|
||||
Primary bool `json:"primary"`
|
||||
Node string `json:"node"`
|
||||
RelocatingNode string `json:"relocating_node"`
|
||||
Shard int `json:"shard"`
|
||||
Index string `json:"index"`
|
||||
Version int64 `json:"version"`
|
||||
RestoreSource *RestoreSource `json:"restore_source"`
|
||||
AllocationId *allocationId `json:"allocation_id"`
|
||||
UnassignedInfo *unassignedInfo `json:"unassigned_info"`
|
||||
}
|
||||
|
||||
type RestoreSource struct {
|
||||
Repository string `json:"repository"`
|
||||
Snapshot string `json:"snapshot"`
|
||||
Version string `json:"version"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type allocationId struct {
|
||||
Id string `json:"id"`
|
||||
RelocationId string `json:"relocation_id"`
|
||||
}
|
||||
|
||||
type unassignedInfo struct {
|
||||
Reason string `json:"reason"`
|
||||
At string `json:"at"`
|
||||
Details string `json:"details"`
|
||||
}
|
||||
93
vendor/gopkg.in/olivere/elastic.v5/cluster_state_test.go
generated
vendored
Normal file
93
vendor/gopkg.in/olivere/elastic.v5/cluster_state_test.go
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClusterState(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
// Get cluster state
|
||||
res, err := client.ClusterState().Index("_all").Metric("_all").Pretty(true).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatalf("expected res to be != nil; got: %v", res)
|
||||
}
|
||||
if res.ClusterName == "" {
|
||||
t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterStateURLs(t *testing.T) {
|
||||
tests := []struct {
|
||||
Service *ClusterStateService
|
||||
ExpectedPath string
|
||||
ExpectedParams url.Values
|
||||
}{
|
||||
{
|
||||
Service: &ClusterStateService{
|
||||
indices: []string{},
|
||||
metrics: []string{},
|
||||
},
|
||||
ExpectedPath: "/_cluster/state/_all/_all",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStateService{
|
||||
indices: []string{"twitter"},
|
||||
metrics: []string{},
|
||||
},
|
||||
ExpectedPath: "/_cluster/state/_all/twitter",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStateService{
|
||||
indices: []string{"twitter", "gplus"},
|
||||
metrics: []string{},
|
||||
},
|
||||
ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStateService{
|
||||
indices: []string{},
|
||||
metrics: []string{"nodes"},
|
||||
},
|
||||
ExpectedPath: "/_cluster/state/nodes/_all",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStateService{
|
||||
indices: []string{"twitter"},
|
||||
metrics: []string{"nodes"},
|
||||
},
|
||||
ExpectedPath: "/_cluster/state/nodes/twitter",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStateService{
|
||||
indices: []string{"twitter"},
|
||||
metrics: []string{"nodes"},
|
||||
masterTimeout: "1s",
|
||||
},
|
||||
ExpectedPath: "/_cluster/state/nodes/twitter",
|
||||
ExpectedParams: url.Values{"master_timeout": []string{"1s"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
gotPath, gotParams, err := test.Service.buildURL()
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if gotPath != test.ExpectedPath {
|
||||
t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
|
||||
}
|
||||
if gotParams.Encode() != test.ExpectedParams.Encode() {
|
||||
t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
|
||||
}
|
||||
}
|
||||
}
|
||||
350
vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go
generated
vendored
Normal file
350
vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// ClusterStatsService is documented at
|
||||
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-stats.html.
|
||||
type ClusterStatsService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
nodeId []string
|
||||
flatSettings *bool
|
||||
human *bool
|
||||
}
|
||||
|
||||
// NewClusterStatsService creates a new ClusterStatsService.
|
||||
func NewClusterStatsService(client *Client) *ClusterStatsService {
|
||||
return &ClusterStatsService{
|
||||
client: client,
|
||||
nodeId: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes.
|
||||
func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService {
|
||||
s.nodeId = nodeId
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatSettings is documented as: Return settings in flat format (default: false).
|
||||
func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService {
|
||||
s.flatSettings = &flatSettings
|
||||
return s
|
||||
}
|
||||
|
||||
// Human is documented as: Whether to return time and byte values in human-readable format..
|
||||
func (s *ClusterStatsService) Human(human bool) *ClusterStatsService {
|
||||
s.human = &human
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *ClusterStatsService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
var err error
|
||||
var path string
|
||||
|
||||
if len(s.nodeId) > 0 {
|
||||
path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{
|
||||
"node_id": strings.Join(s.nodeId, ","),
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
} else {
|
||||
path, err = uritemplates.Expand("/_cluster/stats", map[string]string{})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.flatSettings != nil {
|
||||
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||
}
|
||||
if s.human != nil {
|
||||
params.Set("human", fmt.Sprintf("%v", *s.human))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *ClusterStatsService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(ClusterStatsResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ClusterStatsResponse is the response of ClusterStatsService.Do.
|
||||
type ClusterStatsResponse struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
ClusterName string `json:"cluster_name"`
|
||||
ClusterUUID string `json:"uuid"`
|
||||
Status string `json:"status"`
|
||||
Indices *ClusterStatsIndices `json:"indices"`
|
||||
Nodes *ClusterStatsNodes `json:"nodes"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndices struct {
|
||||
Count int `json:"count"`
|
||||
Shards *ClusterStatsIndicesShards `json:"shards"`
|
||||
Docs *ClusterStatsIndicesDocs `json:"docs"`
|
||||
Store *ClusterStatsIndicesStore `json:"store"`
|
||||
FieldData *ClusterStatsIndicesFieldData `json:"fielddata"`
|
||||
FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"`
|
||||
IdCache *ClusterStatsIndicesIdCache `json:"id_cache"`
|
||||
Completion *ClusterStatsIndicesCompletion `json:"completion"`
|
||||
Segments *ClusterStatsIndicesSegments `json:"segments"`
|
||||
Percolate *ClusterStatsIndicesPercolate `json:"percolate"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesShards struct {
|
||||
Total int `json:"total"`
|
||||
Primaries int `json:"primaries"`
|
||||
Replication float64 `json:"replication"`
|
||||
Index *ClusterStatsIndicesShardsIndex `json:"index"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesShardsIndex struct {
|
||||
Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"`
|
||||
Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"`
|
||||
Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesShardsIndexIntMinMax struct {
|
||||
Min int `json:"min"`
|
||||
Max int `json:"max"`
|
||||
Avg float64 `json:"avg"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesShardsIndexFloat64MinMax struct {
|
||||
Min float64 `json:"min"`
|
||||
Max float64 `json:"max"`
|
||||
Avg float64 `json:"avg"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesDocs struct {
|
||||
Count int `json:"count"`
|
||||
Deleted int `json:"deleted"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesStore struct {
|
||||
Size string `json:"size"` // e.g. "5.3gb"
|
||||
SizeInBytes int64 `json:"size_in_bytes"`
|
||||
ThrottleTime string `json:"throttle_time"` // e.g. "0s"
|
||||
ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesFieldData struct {
|
||||
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||
Evictions int64 `json:"evictions"`
|
||||
Fields map[string]struct {
|
||||
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||
} `json:"fields"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesFilterCache struct {
|
||||
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||
Evictions int64 `json:"evictions"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesIdCache struct {
|
||||
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||
MemorySizeInBytes int64 `json:"memory_size_in_bytes"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesCompletion struct {
|
||||
Size string `json:"size"` // e.g. "61.3kb"
|
||||
SizeInBytes int64 `json:"size_in_bytes"`
|
||||
Fields map[string]struct {
|
||||
Size string `json:"size"` // e.g. "61.3kb"
|
||||
SizeInBytes int64 `json:"size_in_bytes"`
|
||||
} `json:"fields"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesSegments struct {
|
||||
Count int64 `json:"count"`
|
||||
Memory string `json:"memory"` // e.g. "61.3kb"
|
||||
MemoryInBytes int64 `json:"memory_in_bytes"`
|
||||
IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb"
|
||||
IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"`
|
||||
IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb"
|
||||
IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"`
|
||||
VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb"
|
||||
VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"`
|
||||
FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb"
|
||||
FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"`
|
||||
}
|
||||
|
||||
type ClusterStatsIndicesPercolate struct {
|
||||
Total int64 `json:"total"`
|
||||
// TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems
|
||||
Time string `json:"get_time"` // e.g. "1s"
|
||||
TimeInBytes int64 `json:"time_in_millis"`
|
||||
Current int64 `json:"current"`
|
||||
MemorySize string `json:"memory_size"` // e.g. "61.3kb"
|
||||
MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"`
|
||||
Queries int64 `json:"queries"`
|
||||
}
|
||||
|
||||
// ---
|
||||
|
||||
type ClusterStatsNodes struct {
|
||||
Count *ClusterStatsNodesCount `json:"count"`
|
||||
Versions []string `json:"versions"`
|
||||
OS *ClusterStatsNodesOsStats `json:"os"`
|
||||
Process *ClusterStatsNodesProcessStats `json:"process"`
|
||||
JVM *ClusterStatsNodesJvmStats `json:"jvm"`
|
||||
FS *ClusterStatsNodesFsStats `json:"fs"`
|
||||
Plugins []*ClusterStatsNodesPlugin `json:"plugins"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesCount struct {
|
||||
Total int `json:"total"`
|
||||
MasterOnly int `json:"master_only"`
|
||||
DataOnly int `json:"data_only"`
|
||||
MasterData int `json:"master_data"`
|
||||
Client int `json:"client"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesOsStats struct {
|
||||
AvailableProcessors int `json:"available_processors"`
|
||||
Mem *ClusterStatsNodesOsStatsMem `json:"mem"`
|
||||
CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesOsStatsMem struct {
|
||||
Total string `json:"total"` // e.g. "16gb"
|
||||
TotalInBytes int64 `json:"total_in_bytes"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesOsStatsCPU struct {
|
||||
Vendor string `json:"vendor"`
|
||||
Model string `json:"model"`
|
||||
MHz int `json:"mhz"`
|
||||
TotalCores int `json:"total_cores"`
|
||||
TotalSockets int `json:"total_sockets"`
|
||||
CoresPerSocket int `json:"cores_per_socket"`
|
||||
CacheSize string `json:"cache_size"` // e.g. "256b"
|
||||
CacheSizeInBytes int64 `json:"cache_size_in_bytes"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesProcessStats struct {
|
||||
CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"`
|
||||
OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesProcessStatsCPU struct {
|
||||
Percent float64 `json:"percent"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesProcessStatsOpenFileDescriptors struct {
|
||||
Min int64 `json:"min"`
|
||||
Max int64 `json:"max"`
|
||||
Avg int64 `json:"avg"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesJvmStats struct {
|
||||
MaxUptime string `json:"max_uptime"` // e.g. "5h"
|
||||
MaxUptimeInMillis int64 `json:"max_uptime_in_millis"`
|
||||
Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"`
|
||||
Mem *ClusterStatsNodesJvmStatsMem `json:"mem"`
|
||||
Threads int64 `json:"threads"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesJvmStatsVersion struct {
|
||||
Version string `json:"version"` // e.g. "1.8.0_45"
|
||||
VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
|
||||
VMVersion string `json:"vm_version"` // e.g. "25.45-b02"
|
||||
VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesJvmStatsMem struct {
|
||||
HeapUsed string `json:"heap_used"`
|
||||
HeapUsedInBytes int64 `json:"heap_used_in_bytes"`
|
||||
HeapMax string `json:"heap_max"`
|
||||
HeapMaxInBytes int64 `json:"heap_max_in_bytes"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesFsStats struct {
|
||||
Path string `json:"path"`
|
||||
Mount string `json:"mount"`
|
||||
Dev string `json:"dev"`
|
||||
Total string `json:"total"` // e.g. "930.7gb"`
|
||||
TotalInBytes int64 `json:"total_in_bytes"`
|
||||
Free string `json:"free"` // e.g. "930.7gb"`
|
||||
FreeInBytes int64 `json:"free_in_bytes"`
|
||||
Available string `json:"available"` // e.g. "930.7gb"`
|
||||
AvailableInBytes int64 `json:"available_in_bytes"`
|
||||
DiskReads int64 `json:"disk_reads"`
|
||||
DiskWrites int64 `json:"disk_writes"`
|
||||
DiskIOOp int64 `json:"disk_io_op"`
|
||||
DiskReadSize string `json:"disk_read_size"` // e.g. "0b"`
|
||||
DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"`
|
||||
DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"`
|
||||
DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"`
|
||||
DiskIOSize string `json:"disk_io_size"` // e.g. "0b"`
|
||||
DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"`
|
||||
DiskQueue string `json:"disk_queue"`
|
||||
DiskServiceTime string `json:"disk_service_time"`
|
||||
}
|
||||
|
||||
type ClusterStatsNodesPlugin struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Description string `json:"description"`
|
||||
URL string `json:"url"`
|
||||
JVM bool `json:"jvm"`
|
||||
Site bool `json:"site"`
|
||||
}
|
||||
92
vendor/gopkg.in/olivere/elastic.v5/cluster_stats_test.go
generated
vendored
Normal file
92
vendor/gopkg.in/olivere/elastic.v5/cluster_stats_test.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClusterStats(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
// Get cluster stats
|
||||
res, err := client.ClusterStats().Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatalf("expected res to be != nil; got: %v", res)
|
||||
}
|
||||
if res.ClusterName == "" {
|
||||
t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
|
||||
}
|
||||
if res.Nodes == nil {
|
||||
t.Fatalf("expected nodes; got: %v", res.Nodes)
|
||||
}
|
||||
if res.Nodes.Count == nil {
|
||||
t.Fatalf("expected nodes count; got: %v", res.Nodes.Count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterStatsURLs(t *testing.T) {
|
||||
fFlag := false
|
||||
tFlag := true
|
||||
|
||||
tests := []struct {
|
||||
Service *ClusterStatsService
|
||||
ExpectedPath string
|
||||
ExpectedParams url.Values
|
||||
}{
|
||||
{
|
||||
Service: &ClusterStatsService{
|
||||
nodeId: []string{},
|
||||
},
|
||||
ExpectedPath: "/_cluster/stats",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStatsService{
|
||||
nodeId: []string{"node1"},
|
||||
},
|
||||
ExpectedPath: "/_cluster/stats/nodes/node1",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStatsService{
|
||||
nodeId: []string{"node1", "node2"},
|
||||
},
|
||||
ExpectedPath: "/_cluster/stats/nodes/node1%2Cnode2",
|
||||
},
|
||||
{
|
||||
Service: &ClusterStatsService{
|
||||
nodeId: []string{},
|
||||
flatSettings: &tFlag,
|
||||
},
|
||||
ExpectedPath: "/_cluster/stats",
|
||||
ExpectedParams: url.Values{"flat_settings": []string{"true"}},
|
||||
},
|
||||
{
|
||||
Service: &ClusterStatsService{
|
||||
nodeId: []string{"node1"},
|
||||
flatSettings: &fFlag,
|
||||
},
|
||||
ExpectedPath: "/_cluster/stats/nodes/node1",
|
||||
ExpectedParams: url.Values{"flat_settings": []string{"false"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
gotPath, gotParams, err := test.Service.buildURL()
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if gotPath != test.ExpectedPath {
|
||||
t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
|
||||
}
|
||||
if gotParams.Encode() != test.ExpectedParams.Encode() {
|
||||
t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
|
||||
}
|
||||
}
|
||||
}
|
||||
15
vendor/gopkg.in/olivere/elastic.v5/config/elasticsearch.yml
generated
vendored
Normal file
15
vendor/gopkg.in/olivere/elastic.v5/config/elasticsearch.yml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# bootstrap.ignore_system_bootstrap_checks: true
|
||||
|
||||
discovery.zen.minimum_master_nodes: 1
|
||||
|
||||
network.host:
|
||||
- _local_
|
||||
- _site_
|
||||
|
||||
network.publish_host: _local_
|
||||
|
||||
|
||||
# Enable scripting as described here: https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html
|
||||
script.inline: true
|
||||
script.stored: true
|
||||
script.file: true
|
||||
100
vendor/gopkg.in/olivere/elastic.v5/config/jvm.options
generated
vendored
Normal file
100
vendor/gopkg.in/olivere/elastic.v5/config/jvm.options
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
## JVM configuration
|
||||
|
||||
################################################################
|
||||
## IMPORTANT: JVM heap size
|
||||
################################################################
|
||||
##
|
||||
## You should always set the min and max JVM heap
|
||||
## size to the same value. For example, to set
|
||||
## the heap to 4 GB, set:
|
||||
##
|
||||
## -Xms4g
|
||||
## -Xmx4g
|
||||
##
|
||||
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
|
||||
## for more information
|
||||
##
|
||||
################################################################
|
||||
|
||||
# Xms represents the initial size of total heap space
|
||||
# Xmx represents the maximum size of total heap space
|
||||
|
||||
-Xms2g
|
||||
-Xmx2g
|
||||
|
||||
################################################################
|
||||
## Expert settings
|
||||
################################################################
|
||||
##
|
||||
## All settings below this section are considered
|
||||
## expert settings. Don't tamper with them unless
|
||||
## you understand what you are doing
|
||||
##
|
||||
################################################################
|
||||
|
||||
## GC configuration
|
||||
-XX:+UseConcMarkSweepGC
|
||||
-XX:CMSInitiatingOccupancyFraction=75
|
||||
-XX:+UseCMSInitiatingOccupancyOnly
|
||||
|
||||
## optimizations
|
||||
|
||||
# disable calls to System#gc
|
||||
-XX:+DisableExplicitGC
|
||||
|
||||
# pre-touch memory pages used by the JVM during initialization
|
||||
-XX:+AlwaysPreTouch
|
||||
|
||||
## basic
|
||||
|
||||
# force the server VM
|
||||
-server
|
||||
|
||||
# set to headless, just in case
|
||||
-Djava.awt.headless=true
|
||||
|
||||
# ensure UTF-8 encoding by default (e.g. filenames)
|
||||
-Dfile.encoding=UTF-8
|
||||
|
||||
# use our provided JNA always versus the system one
|
||||
-Djna.nosys=true
|
||||
|
||||
# flags to keep Netty from being unsafe
|
||||
-Dio.netty.noUnsafe=true
|
||||
-Dio.netty.noKeySetOptimization=true
|
||||
|
||||
# log4j 2
|
||||
-Dlog4j.shutdownHookEnabled=false
|
||||
-Dlog4j2.disable.jmx=true
|
||||
-Dlog4j.skipJansi=true
|
||||
|
||||
## heap dumps
|
||||
|
||||
# generate a heap dump when an allocation from the Java heap fails
|
||||
# heap dumps are created in the working directory of the JVM
|
||||
-XX:+HeapDumpOnOutOfMemoryError
|
||||
|
||||
# specify an alternative path for heap dumps
|
||||
# ensure the directory exists and has sufficient space
|
||||
#-XX:HeapDumpPath=${heap.dump.path}
|
||||
|
||||
## GC logging
|
||||
|
||||
#-XX:+PrintGCDetails
|
||||
#-XX:+PrintGCTimeStamps
|
||||
#-XX:+PrintGCDateStamps
|
||||
#-XX:+PrintClassHistogram
|
||||
#-XX:+PrintTenuringDistribution
|
||||
#-XX:+PrintGCApplicationStoppedTime
|
||||
|
||||
# log GC status to a file with time stamps
|
||||
# ensure the directory exists
|
||||
#-Xloggc:${loggc}
|
||||
|
||||
# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
|
||||
# If documents were already indexed with unquoted fields in a previous version
|
||||
# of Elasticsearch, some operations may throw errors.
|
||||
#
|
||||
# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
|
||||
# only for migration purposes.
|
||||
#-Delasticsearch.json.allow_unquoted_field_names=true
|
||||
74
vendor/gopkg.in/olivere/elastic.v5/config/log4j2.properties
generated
vendored
Normal file
74
vendor/gopkg.in/olivere/elastic.v5/config/log4j2.properties
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
status = error
|
||||
|
||||
# log action execution errors for easier debugging
|
||||
logger.action.name = org.elasticsearch.action
|
||||
logger.action.level = debug
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
||||
|
||||
appender.rolling.type = RollingFile
|
||||
appender.rolling.name = rolling
|
||||
appender.rolling.fileName = ${sys:es.logs}.log
|
||||
appender.rolling.layout.type = PatternLayout
|
||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
|
||||
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
|
||||
appender.rolling.policies.type = Policies
|
||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling.policies.time.interval = 1
|
||||
appender.rolling.policies.time.modulate = true
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
rootLogger.appenderRef.rolling.ref = rolling
|
||||
|
||||
appender.deprecation_rolling.type = RollingFile
|
||||
appender.deprecation_rolling.name = deprecation_rolling
|
||||
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
|
||||
appender.deprecation_rolling.layout.type = PatternLayout
|
||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
|
||||
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
|
||||
appender.deprecation_rolling.policies.type = Policies
|
||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
appender.deprecation_rolling.policies.size.size = 1GB
|
||||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.deprecation_rolling.strategy.max = 4
|
||||
|
||||
logger.deprecation.name = org.elasticsearch.deprecation
|
||||
logger.deprecation.level = warn
|
||||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
||||
logger.deprecation.additivity = false
|
||||
|
||||
appender.index_search_slowlog_rolling.type = RollingFile
|
||||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
|
||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
|
||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
|
||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
||||
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.index_search_slowlog_rolling.policies.time.interval = 1
|
||||
appender.index_search_slowlog_rolling.policies.time.modulate = true
|
||||
|
||||
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
||||
logger.index_search_slowlog_rolling.level = trace
|
||||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
||||
logger.index_search_slowlog_rolling.additivity = false
|
||||
|
||||
appender.index_indexing_slowlog_rolling.type = RollingFile
|
||||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
|
||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
|
||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
|
||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
|
||||
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
|
||||
|
||||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
||||
logger.index_indexing_slowlog.level = trace
|
||||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
||||
logger.index_indexing_slowlog.additivity = false
|
||||
0
vendor/gopkg.in/olivere/elastic.v5/config/scripts/.gitkeep
generated
vendored
Normal file
0
vendor/gopkg.in/olivere/elastic.v5/config/scripts/.gitkeep
generated
vendored
Normal file
90
vendor/gopkg.in/olivere/elastic.v5/connection.go
generated
vendored
Normal file
90
vendor/gopkg.in/olivere/elastic.v5/connection.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// conn represents a single connection to a node in a cluster.
|
||||
type conn struct {
|
||||
sync.RWMutex
|
||||
nodeID string // node ID
|
||||
url string
|
||||
failures int
|
||||
dead bool
|
||||
deadSince *time.Time
|
||||
}
|
||||
|
||||
// newConn creates a new connection to the given URL.
|
||||
func newConn(nodeID, url string) *conn {
|
||||
c := &conn{
|
||||
nodeID: nodeID,
|
||||
url: url,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// String returns a representation of the connection status.
|
||||
func (c *conn) String() string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
|
||||
}
|
||||
|
||||
// NodeID returns the ID of the node of this connection.
|
||||
func (c *conn) NodeID() string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.nodeID
|
||||
}
|
||||
|
||||
// URL returns the URL of this connection.
|
||||
func (c *conn) URL() string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.url
|
||||
}
|
||||
|
||||
// IsDead returns true if this connection is marked as dead, i.e. a previous
|
||||
// request to the URL has been unsuccessful.
|
||||
func (c *conn) IsDead() bool {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.dead
|
||||
}
|
||||
|
||||
// MarkAsDead marks this connection as dead, increments the failures
|
||||
// counter and stores the current time in dead since.
|
||||
func (c *conn) MarkAsDead() {
|
||||
c.Lock()
|
||||
c.dead = true
|
||||
if c.deadSince == nil {
|
||||
utcNow := time.Now().UTC()
|
||||
c.deadSince = &utcNow
|
||||
}
|
||||
c.failures += 1
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// MarkAsAlive marks this connection as eligible to be returned from the
|
||||
// pool of connections by the selector.
|
||||
func (c *conn) MarkAsAlive() {
|
||||
c.Lock()
|
||||
c.dead = false
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// MarkAsHealthy marks this connection as healthy, i.e. a request has been
|
||||
// successfully performed with it.
|
||||
func (c *conn) MarkAsHealthy() {
|
||||
c.Lock()
|
||||
c.dead = false
|
||||
c.deadSince = nil
|
||||
c.failures = 0
|
||||
c.Unlock()
|
||||
}
|
||||
310
vendor/gopkg.in/olivere/elastic.v5/count.go
generated
vendored
Normal file
310
vendor/gopkg.in/olivere/elastic.v5/count.go
generated
vendored
Normal file
@@ -0,0 +1,310 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// CountService is a convenient service for determining the
|
||||
// number of documents in an index. Use SearchService with
|
||||
// a SearchType of count for counting with queries etc.
|
||||
type CountService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
typ []string
|
||||
allowNoIndices *bool
|
||||
analyzeWildcard *bool
|
||||
analyzer string
|
||||
defaultOperator string
|
||||
df string
|
||||
expandWildcards string
|
||||
ignoreUnavailable *bool
|
||||
lenient *bool
|
||||
lowercaseExpandedTerms *bool
|
||||
minScore interface{}
|
||||
preference string
|
||||
q string
|
||||
query Query
|
||||
routing string
|
||||
bodyJson interface{}
|
||||
bodyString string
|
||||
}
|
||||
|
||||
// NewCountService creates a new CountService.
|
||||
func NewCountService(client *Client) *CountService {
|
||||
return &CountService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Index sets the names of the indices to restrict the results.
|
||||
func (s *CountService) Index(index ...string) *CountService {
|
||||
if s.index == nil {
|
||||
s.index = make([]string, 0)
|
||||
}
|
||||
s.index = append(s.index, index...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Type sets the types to use to restrict the results.
|
||||
func (s *CountService) Type(typ ...string) *CountService {
|
||||
if s.typ == nil {
|
||||
s.typ = make([]string, 0)
|
||||
}
|
||||
s.typ = append(s.typ, typ...)
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices. (This includes "_all" string
|
||||
// or when no indices have been specified).
|
||||
func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// AnalyzeWildcard specifies whether wildcard and prefix queries should be
|
||||
// analyzed (default: false).
|
||||
func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService {
|
||||
s.analyzeWildcard = &analyzeWildcard
|
||||
return s
|
||||
}
|
||||
|
||||
// Analyzer specifies the analyzer to use for the query string.
|
||||
func (s *CountService) Analyzer(analyzer string) *CountService {
|
||||
s.analyzer = analyzer
|
||||
return s
|
||||
}
|
||||
|
||||
// DefaultOperator specifies the default operator for query string query (AND or OR).
|
||||
func (s *CountService) DefaultOperator(defaultOperator string) *CountService {
|
||||
s.defaultOperator = defaultOperator
|
||||
return s
|
||||
}
|
||||
|
||||
// Df specifies the field to use as default where no field prefix is given
|
||||
// in the query string.
|
||||
func (s *CountService) Df(df string) *CountService {
|
||||
s.df = df
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both.
|
||||
func (s *CountService) ExpandWildcards(expandWildcards string) *CountService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// Lenient specifies whether format-based query failures (such as
|
||||
// providing text to a numeric field) should be ignored.
|
||||
func (s *CountService) Lenient(lenient bool) *CountService {
|
||||
s.lenient = &lenient
|
||||
return s
|
||||
}
|
||||
|
||||
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||
func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService {
|
||||
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||
return s
|
||||
}
|
||||
|
||||
// MinScore indicates to include only documents with a specific `_score`
|
||||
// value in the result.
|
||||
func (s *CountService) MinScore(minScore interface{}) *CountService {
|
||||
s.minScore = minScore
|
||||
return s
|
||||
}
|
||||
|
||||
// Preference specifies the node or shard the operation should be
|
||||
// performed on (default: random).
|
||||
func (s *CountService) Preference(preference string) *CountService {
|
||||
s.preference = preference
|
||||
return s
|
||||
}
|
||||
|
||||
// Q in the Lucene query string syntax. You can also use Query to pass
|
||||
// a Query struct.
|
||||
func (s *CountService) Q(q string) *CountService {
|
||||
s.q = q
|
||||
return s
|
||||
}
|
||||
|
||||
// Query specifies the query to pass. You can also pass a query string with Q.
|
||||
func (s *CountService) Query(query Query) *CountService {
|
||||
s.query = query
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing specifies the routing value.
|
||||
func (s *CountService) Routing(routing string) *CountService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *CountService) Pretty(pretty bool) *CountService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyJson specifies the query to restrict the results specified with the
|
||||
// Query DSL (optional). The interface{} will be serialized to a JSON document,
|
||||
// so use a map[string]interface{}.
|
||||
func (s *CountService) BodyJson(body interface{}) *CountService {
|
||||
s.bodyJson = body
|
||||
return s
|
||||
}
|
||||
|
||||
// Body specifies a query to restrict the results specified with
|
||||
// the Query DSL (optional).
|
||||
func (s *CountService) BodyString(body string) *CountService {
|
||||
s.bodyString = body
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *CountService) buildURL() (string, url.Values, error) {
|
||||
var err error
|
||||
var path string
|
||||
|
||||
if len(s.index) > 0 && len(s.typ) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
"type": strings.Join(s.typ, ","),
|
||||
})
|
||||
} else if len(s.index) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/_count", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
} else if len(s.typ) > 0 {
|
||||
path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{
|
||||
"type": strings.Join(s.typ, ","),
|
||||
})
|
||||
} else {
|
||||
path = "/_all/_count"
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.analyzeWildcard != nil {
|
||||
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||
}
|
||||
if s.analyzer != "" {
|
||||
params.Set("analyzer", s.analyzer)
|
||||
}
|
||||
if s.defaultOperator != "" {
|
||||
params.Set("default_operator", s.defaultOperator)
|
||||
}
|
||||
if s.df != "" {
|
||||
params.Set("df", s.df)
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.lenient != nil {
|
||||
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||
}
|
||||
if s.lowercaseExpandedTerms != nil {
|
||||
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||
}
|
||||
if s.minScore != nil {
|
||||
params.Set("min_score", fmt.Sprintf("%v", s.minScore))
|
||||
}
|
||||
if s.preference != "" {
|
||||
params.Set("preference", s.preference)
|
||||
}
|
||||
if s.q != "" {
|
||||
params.Set("q", s.q)
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *CountService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *CountService) Do(ctx context.Context) (int64, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
var body interface{}
|
||||
if s.query != nil {
|
||||
src, err := s.query.Source()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
query := make(map[string]interface{})
|
||||
query["query"] = src
|
||||
body = query
|
||||
} else if s.bodyJson != nil {
|
||||
body = s.bodyJson
|
||||
} else if s.bodyString != "" {
|
||||
body = s.bodyString
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Return result
|
||||
ret := new(CountResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ret != nil {
|
||||
return ret.Count, nil
|
||||
}
|
||||
|
||||
return int64(0), nil
|
||||
}
|
||||
|
||||
// CountResponse is the response of using the Count API.
|
||||
type CountResponse struct {
|
||||
Count int64 `json:"count"`
|
||||
Shards shardsInfo `json:"_shards,omitempty"`
|
||||
}
|
||||
127
vendor/gopkg.in/olivere/elastic.v5/count_test.go
generated
vendored
Normal file
127
vendor/gopkg.in/olivere/elastic.v5/count_test.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCountURL(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tests := []struct {
|
||||
Indices []string
|
||||
Types []string
|
||||
Expected string
|
||||
}{
|
||||
{
|
||||
[]string{},
|
||||
[]string{},
|
||||
"/_all/_count",
|
||||
},
|
||||
{
|
||||
[]string{},
|
||||
[]string{"tweet"},
|
||||
"/_all/tweet/_count",
|
||||
},
|
||||
{
|
||||
[]string{"twitter-*"},
|
||||
[]string{"tweet", "follower"},
|
||||
"/twitter-%2A/tweet%2Cfollower/_count",
|
||||
},
|
||||
{
|
||||
[]string{"twitter-2014", "twitter-2015"},
|
||||
[]string{"tweet", "follower"},
|
||||
"/twitter-2014%2Ctwitter-2015/tweet%2Cfollower/_count",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
path, _, err := client.Count().Index(test.Indices...).Type(test.Types...).buildURL()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if path != test.Expected {
|
||||
t.Errorf("expected %q; got: %q", test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
|
||||
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
|
||||
|
||||
// Add all documents
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Count documents
|
||||
count, err := client.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Errorf("expected Count = %d; got %d", 3, count)
|
||||
}
|
||||
|
||||
// Count documents
|
||||
count, err = client.Count(testIndexName).Type("tweet").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Errorf("expected Count = %d; got %d", 3, count)
|
||||
}
|
||||
|
||||
// Count documents
|
||||
count, err = client.Count(testIndexName).Type("gezwitscher").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 0 {
|
||||
t.Errorf("expected Count = %d; got %d", 0, count)
|
||||
}
|
||||
|
||||
// Count with query
|
||||
query := NewTermQuery("user", "olivere")
|
||||
count, err = client.Count(testIndexName).Query(query).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("expected Count = %d; got %d", 2, count)
|
||||
}
|
||||
|
||||
// Count with query and type
|
||||
query = NewTermQuery("user", "olivere")
|
||||
count, err = client.Count(testIndexName).Type("tweet").Query(query).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("expected Count = %d; got %d", 2, count)
|
||||
}
|
||||
}
|
||||
26
vendor/gopkg.in/olivere/elastic.v5/decoder.go
generated
vendored
Normal file
26
vendor/gopkg.in/olivere/elastic.v5/decoder.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// Decoder is used to decode responses from Elasticsearch.
|
||||
// Users of elastic can implement their own marshaler for advanced purposes
|
||||
// and set them per Client (see SetDecoder). If none is specified,
|
||||
// DefaultDecoder is used.
|
||||
type Decoder interface {
|
||||
Decode(data []byte, v interface{}) error
|
||||
}
|
||||
|
||||
// DefaultDecoder uses json.Unmarshal from the Go standard library
|
||||
// to decode JSON data.
|
||||
type DefaultDecoder struct{}
|
||||
|
||||
// Decode decodes with json.Unmarshal from the Go standard library.
|
||||
func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
50
vendor/gopkg.in/olivere/elastic.v5/decoder_test.go
generated
vendored
Normal file
50
vendor/gopkg.in/olivere/elastic.v5/decoder_test.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
dec json.Decoder
|
||||
|
||||
N int64
|
||||
}
|
||||
|
||||
func (d *decoder) Decode(data []byte, v interface{}) error {
|
||||
atomic.AddInt64(&d.N, 1)
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.UseNumber()
|
||||
return dec.Decode(v)
|
||||
}
|
||||
|
||||
func TestDecoder(t *testing.T) {
|
||||
dec := &decoder{}
|
||||
client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0))
|
||||
|
||||
tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
|
||||
// Add a document
|
||||
indexResult, err := client.Index().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
BodyJson(&tweet).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indexResult == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", indexResult)
|
||||
}
|
||||
if dec.N == 0 {
|
||||
t.Errorf("expected at least 1 call of decoder; got: %d", dec.N)
|
||||
}
|
||||
}
|
||||
208
vendor/gopkg.in/olivere/elastic.v5/delete.go
generated
vendored
Normal file
208
vendor/gopkg.in/olivere/elastic.v5/delete.go
generated
vendored
Normal file
@@ -0,0 +1,208 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// DeleteService allows to delete a typed JSON document from a specified
|
||||
// index based on its id.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete.html
|
||||
// for details.
|
||||
type DeleteService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
id string
|
||||
index string
|
||||
typ string
|
||||
routing string
|
||||
timeout string
|
||||
version interface{}
|
||||
versionType string
|
||||
waitForActiveShards string
|
||||
parent string
|
||||
refresh string
|
||||
}
|
||||
|
||||
// NewDeleteService creates a new DeleteService.
|
||||
func NewDeleteService(client *Client) *DeleteService {
|
||||
return &DeleteService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Type is the type of the document.
|
||||
func (s *DeleteService) Type(typ string) *DeleteService {
|
||||
s.typ = typ
|
||||
return s
|
||||
}
|
||||
|
||||
// Id is the document ID.
|
||||
func (s *DeleteService) Id(id string) *DeleteService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Index is the name of the index.
|
||||
func (s *DeleteService) Index(index string) *DeleteService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing is a specific routing value.
|
||||
func (s *DeleteService) Routing(routing string) *DeleteService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is an explicit operation timeout.
|
||||
func (s *DeleteService) Timeout(timeout string) *DeleteService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Version is an explicit version number for concurrency control.
|
||||
func (s *DeleteService) Version(version interface{}) *DeleteService {
|
||||
s.version = version
|
||||
return s
|
||||
}
|
||||
|
||||
// VersionType is a specific version type.
|
||||
func (s *DeleteService) VersionType(versionType string) *DeleteService {
|
||||
s.versionType = versionType
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForActiveShards sets the number of shard copies that must be active
|
||||
// before proceeding with the delete operation. Defaults to 1, meaning the
|
||||
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||
// any non-negative value less than or equal to the total number of copies
|
||||
// for the shard (number of replicas + 1).
|
||||
func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService {
|
||||
s.waitForActiveShards = waitForActiveShards
|
||||
return s
|
||||
}
|
||||
|
||||
// Parent is the ID of parent document.
|
||||
func (s *DeleteService) Parent(parent string) *DeleteService {
|
||||
s.parent = parent
|
||||
return s
|
||||
}
|
||||
|
||||
// Refresh the index after performing the operation.
|
||||
func (s *DeleteService) Refresh(refresh string) *DeleteService {
|
||||
s.refresh = refresh
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *DeleteService) Pretty(pretty bool) *DeleteService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *DeleteService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||
"index": s.index,
|
||||
"type": s.typ,
|
||||
"id": s.id,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.refresh != "" {
|
||||
params.Set("refresh", s.refresh)
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.version != nil {
|
||||
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||
}
|
||||
if s.versionType != "" {
|
||||
params.Set("version_type", s.versionType)
|
||||
}
|
||||
if s.waitForActiveShards != "" {
|
||||
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||
}
|
||||
if s.parent != "" {
|
||||
params.Set("parent", s.parent)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *DeleteService) Validate() error {
|
||||
var invalid []string
|
||||
if s.typ == "" {
|
||||
invalid = append(invalid, "Type")
|
||||
}
|
||||
if s.id == "" {
|
||||
invalid = append(invalid, "Id")
|
||||
}
|
||||
if s.index == "" {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(DeleteResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Result of a delete request.
|
||||
|
||||
// DeleteResponse is the outcome of running DeleteService.Do.
|
||||
type DeleteResponse struct {
|
||||
// TODO _shards { total, failed, successful }
|
||||
Found bool `json:"found"`
|
||||
Index string `json:"_index"`
|
||||
Type string `json:"_type"`
|
||||
Id string `json:"_id"`
|
||||
Version int64 `json:"_version"`
|
||||
}
|
||||
648
vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
generated
vendored
Normal file
648
vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go
generated
vendored
Normal file
@@ -0,0 +1,648 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// DeleteByQueryService deletes documents that match a query.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete-by-query.html.
|
||||
type DeleteByQueryService struct {
|
||||
client *Client
|
||||
index []string
|
||||
typ []string
|
||||
query Query
|
||||
body interface{}
|
||||
xSource []string
|
||||
xSourceExclude []string
|
||||
xSourceInclude []string
|
||||
analyzer string
|
||||
analyzeWildcard *bool
|
||||
allowNoIndices *bool
|
||||
conflicts string
|
||||
defaultOperator string
|
||||
df string
|
||||
docvalueFields []string
|
||||
expandWildcards string
|
||||
explain *bool
|
||||
from *int
|
||||
ignoreUnavailable *bool
|
||||
lenient *bool
|
||||
lowercaseExpandedTerms *bool
|
||||
preference string
|
||||
q string
|
||||
refresh string
|
||||
requestCache *bool
|
||||
requestsPerSecond *int
|
||||
routing []string
|
||||
scroll string
|
||||
scrollSize *int
|
||||
searchTimeout string
|
||||
searchType string
|
||||
size *int
|
||||
sort []string
|
||||
stats []string
|
||||
storedFields []string
|
||||
suggestField string
|
||||
suggestMode string
|
||||
suggestSize *int
|
||||
suggestText string
|
||||
terminateAfter *int
|
||||
timeout string
|
||||
trackScores *bool
|
||||
version *bool
|
||||
waitForActiveShards string
|
||||
waitForCompletion *bool
|
||||
pretty bool
|
||||
}
|
||||
|
||||
// NewDeleteByQueryService creates a new DeleteByQueryService.
|
||||
// You typically use the client's DeleteByQuery to get a reference to
|
||||
// the service.
|
||||
func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
|
||||
builder := &DeleteByQueryService{
|
||||
client: client,
|
||||
}
|
||||
return builder
|
||||
}
|
||||
|
||||
// Index sets the indices on which to perform the delete operation.
|
||||
func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService {
|
||||
s.index = append(s.index, index...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Type limits the delete operation to the given types.
|
||||
func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService {
|
||||
s.typ = append(s.typ, typ...)
|
||||
return s
|
||||
}
|
||||
|
||||
// XSource is true or false to return the _source field or not,
|
||||
// or a list of fields to return.
|
||||
func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService {
|
||||
s.xSource = append(s.xSource, xSource...)
|
||||
return s
|
||||
}
|
||||
|
||||
// XSourceExclude represents a list of fields to exclude from the returned _source field.
|
||||
func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService {
|
||||
s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
|
||||
return s
|
||||
}
|
||||
|
||||
// XSourceInclude represents a list of fields to extract and return from the _source field.
|
||||
func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService {
|
||||
s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Analyzer to use for the query string.
|
||||
func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
|
||||
s.analyzer = analyzer
|
||||
return s
|
||||
}
|
||||
|
||||
// AnalyzeWildcard specifies whether wildcard and prefix queries should be
|
||||
// analyzed (default: false).
|
||||
func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService {
|
||||
s.analyzeWildcard = &analyzeWildcard
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices (including the _all string
|
||||
// or when no indices have been specified).
|
||||
func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
|
||||
s.allowNoIndices = &allow
|
||||
return s
|
||||
}
|
||||
|
||||
// Conflicts indicates what to do when the process detects version conflicts.
|
||||
// Possible values are "proceed" and "abort".
|
||||
func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService {
|
||||
s.conflicts = conflicts
|
||||
return s
|
||||
}
|
||||
|
||||
// AbortOnVersionConflict aborts the request on version conflicts.
|
||||
// It is an alias to setting Conflicts("abort").
|
||||
func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService {
|
||||
s.conflicts = "abort"
|
||||
return s
|
||||
}
|
||||
|
||||
// ProceedOnVersionConflict aborts the request on version conflicts.
|
||||
// It is an alias to setting Conflicts("proceed").
|
||||
func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService {
|
||||
s.conflicts = "proceed"
|
||||
return s
|
||||
}
|
||||
|
||||
// DefaultOperator for query string query (AND or OR).
|
||||
func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
|
||||
s.defaultOperator = defaultOperator
|
||||
return s
|
||||
}
|
||||
|
||||
// DF is the field to use as default where no field prefix is given in the query string.
|
||||
func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
|
||||
s.df = defaultField
|
||||
return s
|
||||
}
|
||||
|
||||
// DefaultField is the field to use as default where no field prefix is given in the query string.
|
||||
// It is an alias to the DF func.
|
||||
func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
|
||||
s.df = defaultField
|
||||
return s
|
||||
}
|
||||
|
||||
// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit.
|
||||
func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService {
|
||||
s.docvalueFields = docvalueFields
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both. It can be "open" or "closed".
|
||||
func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
|
||||
s.expandWildcards = expand
|
||||
return s
|
||||
}
|
||||
|
||||
// Explain specifies whether to return detailed information about score
|
||||
// computation as part of a hit.
|
||||
func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService {
|
||||
s.explain = &explain
|
||||
return s
|
||||
}
|
||||
|
||||
// From is the starting offset (default: 0).
|
||||
func (s *DeleteByQueryService) From(from int) *DeleteByQueryService {
|
||||
s.from = &from
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
|
||||
s.ignoreUnavailable = &ignore
|
||||
return s
|
||||
}
|
||||
|
||||
// Lenient specifies whether format-based query failures
|
||||
// (such as providing text to a numeric field) should be ignored.
|
||||
func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService {
|
||||
s.lenient = &lenient
|
||||
return s
|
||||
}
|
||||
|
||||
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||
func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService {
|
||||
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||
return s
|
||||
}
|
||||
|
||||
// Preference specifies the node or shard the operation should be performed on
|
||||
// (default: random).
|
||||
func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService {
|
||||
s.preference = preference
|
||||
return s
|
||||
}
|
||||
|
||||
// Q specifies the query in Lucene query string syntax. You can also use
|
||||
// Query to programmatically specify the query.
|
||||
func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
|
||||
s.q = query
|
||||
return s
|
||||
}
|
||||
|
||||
// QueryString is an alias to Q. Notice that you can also use Query to
|
||||
// programmatically set the query.
|
||||
func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
|
||||
s.q = query
|
||||
return s
|
||||
}
|
||||
|
||||
// Query sets the query programmatically.
|
||||
func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
|
||||
s.query = query
|
||||
return s
|
||||
}
|
||||
|
||||
// Refresh indicates whether the effected indexes should be refreshed.
|
||||
func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService {
|
||||
s.refresh = refresh
|
||||
return s
|
||||
}
|
||||
|
||||
// RequestCache specifies if request cache should be used for this request
|
||||
// or not, defaults to index level setting.
|
||||
func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService {
|
||||
s.requestCache = &requestCache
|
||||
return s
|
||||
}
|
||||
|
||||
// RequestsPerSecond sets the throttle on this request in sub-requests per second.
|
||||
// -1 means set no throttle as does "unlimited" which is the only non-float this accepts.
|
||||
func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService {
|
||||
s.requestsPerSecond = &requestsPerSecond
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing is a list of specific routing values.
|
||||
func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService {
|
||||
s.routing = append(s.routing, routing...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Scroll specifies how long a consistent view of the index should be maintained
|
||||
// for scrolled search.
|
||||
func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService {
|
||||
s.scroll = scroll
|
||||
return s
|
||||
}
|
||||
|
||||
// ScrollSize is the size on the scroll request powering the update_by_query.
|
||||
func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService {
|
||||
s.scrollSize = &scrollSize
|
||||
return s
|
||||
}
|
||||
|
||||
// SearchTimeout defines an explicit timeout for each search request.
|
||||
// Defaults to no timeout.
|
||||
func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService {
|
||||
s.searchTimeout = searchTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// SearchType is the search operation type. Possible values are
|
||||
// "query_then_fetch" and "dfs_query_then_fetch".
|
||||
func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService {
|
||||
s.searchType = searchType
|
||||
return s
|
||||
}
|
||||
|
||||
// Size represents the number of hits to return (default: 10).
|
||||
func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService {
|
||||
s.size = &size
|
||||
return s
|
||||
}
|
||||
|
||||
// Sort is a list of <field>:<direction> pairs.
|
||||
func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService {
|
||||
s.sort = append(s.sort, sort...)
|
||||
return s
|
||||
}
|
||||
|
||||
// SortByField adds a sort order.
|
||||
func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService {
|
||||
if ascending {
|
||||
s.sort = append(s.sort, fmt.Sprintf("%s:asc", field))
|
||||
} else {
|
||||
s.sort = append(s.sort, fmt.Sprintf("%s:desc", field))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Stats specifies specific tag(s) of the request for logging and statistical purposes.
|
||||
func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService {
|
||||
s.stats = append(s.stats, stats...)
|
||||
return s
|
||||
}
|
||||
|
||||
// StoredFields specifies the list of stored fields to return as part of a hit.
|
||||
func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService {
|
||||
s.storedFields = storedFields
|
||||
return s
|
||||
}
|
||||
|
||||
// SuggestField specifies which field to use for suggestions.
|
||||
func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService {
|
||||
s.suggestField = suggestField
|
||||
return s
|
||||
}
|
||||
|
||||
// SuggestMode specifies the suggest mode. Possible values are
|
||||
// "missing", "popular", and "always".
|
||||
func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService {
|
||||
s.suggestMode = suggestMode
|
||||
return s
|
||||
}
|
||||
|
||||
// SuggestSize specifies how many suggestions to return in response.
|
||||
func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService {
|
||||
s.suggestSize = &suggestSize
|
||||
return s
|
||||
}
|
||||
|
||||
// SuggestText specifies the source text for which the suggestions should be returned.
|
||||
func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService {
|
||||
s.suggestText = suggestText
|
||||
return s
|
||||
}
|
||||
|
||||
// TerminateAfter indicates the maximum number of documents to collect
|
||||
// for each shard, upon reaching which the query execution will terminate early.
|
||||
func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService {
|
||||
s.terminateAfter = &terminateAfter
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is the time each individual bulk request should wait for shards
|
||||
// that are unavailable.
|
||||
func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// TimeoutInMillis sets the timeout in milliseconds.
|
||||
func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService {
|
||||
s.timeout = fmt.Sprintf("%dms", timeoutInMillis)
|
||||
return s
|
||||
}
|
||||
|
||||
// TrackScores indicates whether to calculate and return scores even if
|
||||
// they are not used for sorting.
|
||||
func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService {
|
||||
s.trackScores = &trackScores
|
||||
return s
|
||||
}
|
||||
|
||||
// Version specifies whether to return document version as part of a hit.
|
||||
func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService {
|
||||
s.version = &version
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForActiveShards sets the number of shard copies that must be active before proceeding
|
||||
// with the update by query operation. Defaults to 1, meaning the primary shard only.
|
||||
// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal
|
||||
// to the total number of copies for the shard (number of replicas + 1).
|
||||
func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService {
|
||||
s.waitForActiveShards = waitForActiveShards
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForCompletion indicates if the request should block until the reindex is complete.
|
||||
func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService {
|
||||
s.waitForCompletion = &waitForCompletion
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indents the JSON output from Elasticsearch.
|
||||
func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// Body specifies the body of the request. It overrides data being specified via SearchService.
|
||||
func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService {
|
||||
s.body = body
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *DeleteByQueryService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
var err error
|
||||
var path string
|
||||
if len(s.typ) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
"type": strings.Join(s.typ, ","),
|
||||
})
|
||||
} else {
|
||||
path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if len(s.xSource) > 0 {
|
||||
params.Set("_source", strings.Join(s.xSource, ","))
|
||||
}
|
||||
if len(s.xSourceExclude) > 0 {
|
||||
params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
|
||||
}
|
||||
if len(s.xSourceInclude) > 0 {
|
||||
params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
|
||||
}
|
||||
if s.analyzer != "" {
|
||||
params.Set("analyzer", s.analyzer)
|
||||
}
|
||||
if s.analyzeWildcard != nil {
|
||||
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||
}
|
||||
if s.defaultOperator != "" {
|
||||
params.Set("default_operator", s.defaultOperator)
|
||||
}
|
||||
if s.df != "" {
|
||||
params.Set("df", s.df)
|
||||
}
|
||||
if s.explain != nil {
|
||||
params.Set("explain", fmt.Sprintf("%v", *s.explain))
|
||||
}
|
||||
if len(s.storedFields) > 0 {
|
||||
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||
}
|
||||
if len(s.docvalueFields) > 0 {
|
||||
params.Set("docvalue_fields", strings.Join(s.docvalueFields, ","))
|
||||
}
|
||||
if s.from != nil {
|
||||
params.Set("from", fmt.Sprintf("%d", *s.from))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.conflicts != "" {
|
||||
params.Set("conflicts", s.conflicts)
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.lenient != nil {
|
||||
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||
}
|
||||
if s.lowercaseExpandedTerms != nil {
|
||||
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||
}
|
||||
if s.preference != "" {
|
||||
params.Set("preference", s.preference)
|
||||
}
|
||||
if s.q != "" {
|
||||
params.Set("q", s.q)
|
||||
}
|
||||
if len(s.routing) > 0 {
|
||||
params.Set("routing", strings.Join(s.routing, ","))
|
||||
}
|
||||
if s.scroll != "" {
|
||||
params.Set("scroll", s.scroll)
|
||||
}
|
||||
if s.searchType != "" {
|
||||
params.Set("search_type", s.searchType)
|
||||
}
|
||||
if s.searchTimeout != "" {
|
||||
params.Set("search_timeout", s.searchTimeout)
|
||||
}
|
||||
if s.size != nil {
|
||||
params.Set("size", fmt.Sprintf("%d", *s.size))
|
||||
}
|
||||
if len(s.sort) > 0 {
|
||||
params.Set("sort", strings.Join(s.sort, ","))
|
||||
}
|
||||
if s.terminateAfter != nil {
|
||||
params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter))
|
||||
}
|
||||
if len(s.stats) > 0 {
|
||||
params.Set("stats", strings.Join(s.stats, ","))
|
||||
}
|
||||
if s.suggestField != "" {
|
||||
params.Set("suggest_field", s.suggestField)
|
||||
}
|
||||
if s.suggestMode != "" {
|
||||
params.Set("suggest_mode", s.suggestMode)
|
||||
}
|
||||
if s.suggestSize != nil {
|
||||
params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize))
|
||||
}
|
||||
if s.suggestText != "" {
|
||||
params.Set("suggest_text", s.suggestText)
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.trackScores != nil {
|
||||
params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores))
|
||||
}
|
||||
if s.version != nil {
|
||||
params.Set("version", fmt.Sprintf("%v", *s.version))
|
||||
}
|
||||
if s.requestCache != nil {
|
||||
params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache))
|
||||
}
|
||||
if s.refresh != "" {
|
||||
params.Set("refresh", s.refresh)
|
||||
}
|
||||
if s.waitForActiveShards != "" {
|
||||
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||
}
|
||||
if s.scrollSize != nil {
|
||||
params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize))
|
||||
}
|
||||
if s.waitForCompletion != nil {
|
||||
params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion))
|
||||
}
|
||||
if s.requestsPerSecond != nil {
|
||||
params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond))
|
||||
}
|
||||
if s.pretty {
|
||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *DeleteByQueryService) Validate() error {
|
||||
var invalid []string
|
||||
if len(s.index) == 0 {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the delete-by-query operation.
|
||||
func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set body if there is a query set
|
||||
var body interface{}
|
||||
if s.body != nil {
|
||||
body = s.body
|
||||
} else if s.query != nil {
|
||||
src, err := s.query.Source()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body = map[string]interface{}{
|
||||
"query": src,
|
||||
}
|
||||
}
|
||||
|
||||
// Get response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return result
|
||||
ret := new(BulkIndexByScrollResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// BulkIndexByScrollResponse is the outcome of executing Do with
|
||||
// DeleteByQueryService and UpdateByQueryService.
|
||||
type BulkIndexByScrollResponse struct {
|
||||
Took int64 `json:"took"`
|
||||
TimedOut bool `json:"timed_out"`
|
||||
Total int64 `json:"total"`
|
||||
Updated int64 `json:"updated"`
|
||||
Created int64 `json:"created"`
|
||||
Deleted int64 `json:"deleted"`
|
||||
Batches int64 `json:"batches"`
|
||||
VersionConflicts int64 `json:"version_conflicts"`
|
||||
Noops int64 `json:"noops"`
|
||||
Retries struct {
|
||||
Bulk int64 `json:"bulk"`
|
||||
Search int64 `json:"search"`
|
||||
} `json:"retries"`
|
||||
Throttled string `json:"throttled"`
|
||||
ThrottledMillis int64 `json:"throttled_millis"`
|
||||
RequestsPerSecond float64 `json:"requests_per_second"`
|
||||
Canceled string `json:"canceled"`
|
||||
ThrottledUntil string `json:"throttled_until"`
|
||||
ThrottledUntilMillis int64 `json:"throttled_until_millis"`
|
||||
Failures []bulkIndexByScrollResponseFailure `json:"failures"`
|
||||
}
|
||||
|
||||
type bulkIndexByScrollResponseFailure struct {
|
||||
Index string `json:"index,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Status int `json:"status,omitempty"`
|
||||
Shard int `json:"shard,omitempty"`
|
||||
Node int `json:"node,omitempty"`
|
||||
// TOOD "cause" contains exception details
|
||||
// TOOD "reason" contains exception details
|
||||
}
|
||||
146
vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go
generated
vendored
Normal file
146
vendor/gopkg.in/olivere/elastic.v5/delete_by_query_test.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDeleteByQueryBuildURL(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
tests := []struct {
|
||||
Indices []string
|
||||
Types []string
|
||||
Expected string
|
||||
ExpectErr bool
|
||||
}{
|
||||
{
|
||||
[]string{},
|
||||
[]string{},
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
[]string{},
|
||||
"/index1/_delete_by_query",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
[]string{},
|
||||
"/index1%2Cindex2/_delete_by_query",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{},
|
||||
[]string{"type1"},
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
[]string{"type1"},
|
||||
"/index1/type1/_delete_by_query",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
[]string{"type1", "type2"},
|
||||
"/index1%2Cindex2/type1%2Ctype2/_delete_by_query",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
builder := client.DeleteByQuery().Index(test.Indices...).Type(test.Types...)
|
||||
err := builder.Validate()
|
||||
if err != nil {
|
||||
if !test.ExpectErr {
|
||||
t.Errorf("case #%d: %v", i+1, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// err == nil
|
||||
if test.ExpectErr {
|
||||
t.Errorf("case #%d: expected error", i+1)
|
||||
continue
|
||||
}
|
||||
path, _, _ := builder.buildURL()
|
||||
if path != test.Expected {
|
||||
t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteByQuery(t *testing.T) {
|
||||
// client := setupTestClientAndCreateIndex(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
|
||||
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
|
||||
|
||||
// Add all documents
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Count documents
|
||||
count, err := client.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Fatalf("expected count = %d; got: %d", 3, count)
|
||||
}
|
||||
|
||||
// Delete all documents by sandrae
|
||||
q := NewTermQuery("user", "sandrae")
|
||||
res, err := client.DeleteByQuery().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
Query(q).
|
||||
Pretty(true).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatalf("expected response != nil; got: %v", res)
|
||||
}
|
||||
|
||||
// Flush and check count
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err = client.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Fatalf("expected Count = %d; got: %d", 2, count)
|
||||
}
|
||||
}
|
||||
109
vendor/gopkg.in/olivere/elastic.v5/delete_template.go
generated
vendored
Normal file
109
vendor/gopkg.in/olivere/elastic.v5/delete_template.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// DeleteTemplateService deletes a search template. More information can
|
||||
// be found at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
|
||||
type DeleteTemplateService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
id string
|
||||
version *int
|
||||
versionType string
|
||||
}
|
||||
|
||||
// NewDeleteTemplateService creates a new DeleteTemplateService.
|
||||
func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
|
||||
return &DeleteTemplateService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Id is the template ID.
|
||||
func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Version an explicit version number for concurrency control.
|
||||
func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
|
||||
s.version = &version
|
||||
return s
|
||||
}
|
||||
|
||||
// VersionType specifies a version type.
|
||||
func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
|
||||
s.versionType = versionType
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
|
||||
"id": s.id,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.version != nil {
|
||||
params.Set("version", fmt.Sprintf("%d", *s.version))
|
||||
}
|
||||
if s.versionType != "" {
|
||||
params.Set("version_type", s.versionType)
|
||||
}
|
||||
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *DeleteTemplateService) Validate() error {
|
||||
var invalid []string
|
||||
if s.id == "" {
|
||||
invalid = append(invalid, "Id")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *DeleteTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(AcknowledgedResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
23
vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go
generated
vendored
Normal file
23
vendor/gopkg.in/olivere/elastic.v5/delete_template_test.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDeleteTemplateValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No template id -> fail with error
|
||||
res, err := NewDeleteTemplateService(client).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without index name")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
119
vendor/gopkg.in/olivere/elastic.v5/delete_test.go
generated
vendored
Normal file
119
vendor/gopkg.in/olivere/elastic.v5/delete_test.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
|
||||
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
|
||||
|
||||
// Add all documents
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Count documents
|
||||
count, err := client.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 3 {
|
||||
t.Errorf("expected Count = %d; got %d", 3, count)
|
||||
}
|
||||
|
||||
// Delete document 1
|
||||
res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Found != true {
|
||||
t.Errorf("expected Found = true; got %v", res.Found)
|
||||
}
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count, err = client.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("expected Count = %d; got %d", 2, count)
|
||||
}
|
||||
|
||||
// Delete non existent document 99
|
||||
res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh("true").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected error; got: %v", err)
|
||||
}
|
||||
if !IsNotFound(err) {
|
||||
t.Errorf("expected NotFound error; got %v", err)
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected no response; got: %v", res)
|
||||
}
|
||||
|
||||
count, err = client.Count(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("expected Count = %d; got %d", 2, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteValidate(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndexAndAddDocs(t)
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewDeleteService(client).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without index name")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
|
||||
// No type -> fail with error
|
||||
res, err = NewDeleteService(client).Index(testIndexName).Id("1").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without type")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
|
||||
// No id -> fail with error
|
||||
res, err = NewDeleteService(client).Index(testIndexName).Type("tweet").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without id")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
51
vendor/gopkg.in/olivere/elastic.v5/doc.go
generated
vendored
Normal file
51
vendor/gopkg.in/olivere/elastic.v5/doc.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
/*
|
||||
Package elastic provides an interface to the Elasticsearch server
|
||||
(https://www.elastic.co/products/elasticsearch).
|
||||
|
||||
The first thing you do is to create a Client. If you have Elasticsearch
|
||||
installed and running with its default settings
|
||||
(i.e. available at http://127.0.0.1:9200), all you need to do is:
|
||||
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
If your Elasticsearch server is running on a different IP and/or port,
|
||||
just provide a URL to NewClient:
|
||||
|
||||
// Create a client and connect to http://192.168.2.10:9201
|
||||
client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
You can pass many more configuration parameters to NewClient. Review the
|
||||
documentation of NewClient for more information.
|
||||
|
||||
If no Elasticsearch server is available, services will fail when creating
|
||||
a new request and will return ErrNoClient.
|
||||
|
||||
A Client provides services. The services usually come with a variety of
|
||||
methods to prepare the query and a Do function to execute it against the
|
||||
Elasticsearch REST interface and return a response. Here is an example
|
||||
of the IndexExists service that checks if a given index already exists.
|
||||
|
||||
exists, err := client.IndexExists("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
if !exists {
|
||||
// Index does not exist yet.
|
||||
}
|
||||
|
||||
Look up the documentation for Client to get an idea of the services provided
|
||||
and what kinds of responses you get when executing the Do function of a service.
|
||||
Also see the wiki on Github for more details.
|
||||
|
||||
*/
|
||||
package elastic
|
||||
141
vendor/gopkg.in/olivere/elastic.v5/errors.go
generated
vendored
Normal file
141
vendor/gopkg.in/olivere/elastic.v5/errors.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// checkResponse will return an error if the request/response indicates
|
||||
// an error returned from Elasticsearch.
|
||||
//
|
||||
// HTTP status codes between in the range [200..299] are considered successful.
|
||||
// All other errors are considered errors except they are specified in
|
||||
// ignoreErrors. This is necessary because for some services, HTTP status 404
|
||||
// is a valid response from Elasticsearch (e.g. the Exists service).
|
||||
//
|
||||
// The func tries to parse error details as returned from Elasticsearch
|
||||
// and encapsulates them in type elastic.Error.
|
||||
func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error {
|
||||
// 200-299 are valid status codes
|
||||
if res.StatusCode >= 200 && res.StatusCode <= 299 {
|
||||
return nil
|
||||
}
|
||||
// Ignore certain errors?
|
||||
for _, code := range ignoreErrors {
|
||||
if code == res.StatusCode {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return createResponseError(res)
|
||||
}
|
||||
|
||||
// createResponseError creates an Error structure from the HTTP response,
|
||||
// its status code and the error information sent by Elasticsearch.
|
||||
func createResponseError(res *http.Response) error {
|
||||
if res.Body == nil {
|
||||
return &Error{Status: res.StatusCode}
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return &Error{Status: res.StatusCode}
|
||||
}
|
||||
errReply := new(Error)
|
||||
err = json.Unmarshal(data, errReply)
|
||||
if err != nil {
|
||||
return &Error{Status: res.StatusCode}
|
||||
}
|
||||
if errReply != nil {
|
||||
if errReply.Status == 0 {
|
||||
errReply.Status = res.StatusCode
|
||||
}
|
||||
return errReply
|
||||
}
|
||||
return &Error{Status: res.StatusCode}
|
||||
}
|
||||
|
||||
// Error encapsulates error details as returned from Elasticsearch.
|
||||
type Error struct {
|
||||
Status int `json:"status"`
|
||||
Details *ErrorDetails `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails encapsulate error details from Elasticsearch.
|
||||
// It is used in e.g. elastic.Error and elastic.BulkResponseItem.
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"type"`
|
||||
Reason string `json:"reason"`
|
||||
ResourceType string `json:"resource.type,omitempty"`
|
||||
ResourceId string `json:"resource.id,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Grouped bool `json:"grouped,omitempty"`
|
||||
CausedBy map[string]interface{} `json:"caused_by,omitempty"`
|
||||
RootCause []*ErrorDetails `json:"root_cause,omitempty"`
|
||||
FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string representation of the error.
|
||||
func (e *Error) Error() string {
|
||||
if e.Details != nil && e.Details.Reason != "" {
|
||||
return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type)
|
||||
} else {
|
||||
return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
|
||||
}
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the given error indicates that Elasticsearch
|
||||
// returned HTTP status 404. The err parameter can be of type *elastic.Error,
|
||||
// elastic.Error, *http.Response or int (indicating the HTTP status code).
|
||||
func IsNotFound(err interface{}) bool {
|
||||
switch e := err.(type) {
|
||||
case *http.Response:
|
||||
return e.StatusCode == http.StatusNotFound
|
||||
case *Error:
|
||||
return e.Status == http.StatusNotFound
|
||||
case Error:
|
||||
return e.Status == http.StatusNotFound
|
||||
case int:
|
||||
return e == http.StatusNotFound
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsTimeout returns true if the given error indicates that Elasticsearch
|
||||
// returned HTTP status 408. The err parameter can be of type *elastic.Error,
|
||||
// elastic.Error, *http.Response or int (indicating the HTTP status code).
|
||||
func IsTimeout(err interface{}) bool {
|
||||
switch e := err.(type) {
|
||||
case *http.Response:
|
||||
return e.StatusCode == http.StatusRequestTimeout
|
||||
case *Error:
|
||||
return e.Status == http.StatusRequestTimeout
|
||||
case Error:
|
||||
return e.Status == http.StatusRequestTimeout
|
||||
case int:
|
||||
return e == http.StatusRequestTimeout
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// -- General errors --
|
||||
|
||||
// shardsInfo represents information from a shard.
|
||||
type shardsInfo struct {
|
||||
Total int `json:"total"`
|
||||
Successful int `json:"successful"`
|
||||
Failed int `json:"failed"`
|
||||
}
|
||||
|
||||
// shardOperationFailure represents a shard failure.
|
||||
type shardOperationFailure struct {
|
||||
Shard int `json:"shard"`
|
||||
Index string `json:"index"`
|
||||
Status string `json:"status"`
|
||||
// "reason"
|
||||
}
|
||||
202
vendor/gopkg.in/olivere/elastic.v5/errors_test.go
generated
vendored
Normal file
202
vendor/gopkg.in/olivere/elastic.v5/errors_test.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResponseError(t *testing.T) {
|
||||
raw := "HTTP/1.1 404 Not Found\r\n" +
|
||||
"\r\n" +
|
||||
`{"error":{"root_cause":[{"type":"index_missing_exception","reason":"no such index","index":"elastic-test"}],"type":"index_missing_exception","reason":"no such index","index":"elastic-test"},"status":404}` + "\r\n"
|
||||
r := bufio.NewReader(strings.NewReader(raw))
|
||||
|
||||
req, err := http.NewRequest("GET", "/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(r, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = checkResponse(req, resp)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error; got: %v", err)
|
||||
}
|
||||
|
||||
// Check for correct error message
|
||||
expected := fmt.Sprintf("elastic: Error %d (%s): no such index [type=index_missing_exception]", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
got := err.Error()
|
||||
if got != expected {
|
||||
t.Fatalf("expected %q; got: %q", expected, got)
|
||||
}
|
||||
|
||||
// Check that error is of type *elastic.Error, which contains additional information
|
||||
e, ok := err.(*Error)
|
||||
if !ok {
|
||||
t.Fatal("expected error to be of type *elastic.Error")
|
||||
}
|
||||
if e.Status != resp.StatusCode {
|
||||
t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status)
|
||||
}
|
||||
if e.Details == nil {
|
||||
t.Fatalf("expected error details; got: %v", e.Details)
|
||||
}
|
||||
if got, want := e.Details.Index, "elastic-test"; got != want {
|
||||
t.Fatalf("expected error details index %q; got: %q", want, got)
|
||||
}
|
||||
if got, want := e.Details.Type, "index_missing_exception"; got != want {
|
||||
t.Fatalf("expected error details type %q; got: %q", want, got)
|
||||
}
|
||||
if got, want := e.Details.Reason, "no such index"; got != want {
|
||||
t.Fatalf("expected error details reason %q; got: %q", want, got)
|
||||
}
|
||||
if got, want := len(e.Details.RootCause), 1; got != want {
|
||||
t.Fatalf("expected %d error details root causes; got: %d", want, got)
|
||||
}
|
||||
|
||||
if got, want := e.Details.RootCause[0].Index, "elastic-test"; got != want {
|
||||
t.Fatalf("expected root cause index %q; got: %q", want, got)
|
||||
}
|
||||
if got, want := e.Details.RootCause[0].Type, "index_missing_exception"; got != want {
|
||||
t.Fatalf("expected root cause type %q; got: %q", want, got)
|
||||
}
|
||||
if got, want := e.Details.RootCause[0].Reason, "no such index"; got != want {
|
||||
t.Fatalf("expected root cause reason %q; got: %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseErrorHTML(t *testing.T) {
|
||||
raw := "HTTP/1.1 413 Request Entity Too Large\r\n" +
|
||||
"\r\n" +
|
||||
`<html>
|
||||
<head><title>413 Request Entity Too Large</title></head>
|
||||
<body bgcolor="white">
|
||||
<center><h1>413 Request Entity Too Large</h1></center>
|
||||
<hr><center>nginx/1.6.2</center>
|
||||
</body>
|
||||
</html>` + "\r\n"
|
||||
r := bufio.NewReader(strings.NewReader(raw))
|
||||
|
||||
req, err := http.NewRequest("GET", "/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(r, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = checkResponse(req, resp)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error; got: %v", err)
|
||||
}
|
||||
|
||||
// Check for correct error message
|
||||
expected := fmt.Sprintf("elastic: Error %d (%s)", http.StatusRequestEntityTooLarge, http.StatusText(http.StatusRequestEntityTooLarge))
|
||||
got := err.Error()
|
||||
if got != expected {
|
||||
t.Fatalf("expected %q; got: %q", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResponseErrorWithIgnore(t *testing.T) {
|
||||
raw := "HTTP/1.1 404 Not Found\r\n" +
|
||||
"\r\n" +
|
||||
`{"some":"response"}` + "\r\n"
|
||||
r := bufio.NewReader(strings.NewReader(raw))
|
||||
|
||||
req, err := http.NewRequest("HEAD", "/", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(r, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = checkResponse(req, resp)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error; got: %v", err)
|
||||
}
|
||||
err = checkResponse(req, resp, 404) // ignore 404 errors
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNotFound(t *testing.T) {
|
||||
if got, want := IsNotFound(nil), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsNotFound(""), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsNotFound(200), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsNotFound(404), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
|
||||
if got, want := IsNotFound(&Error{Status: 404}), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsNotFound(&Error{Status: 200}), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
|
||||
if got, want := IsNotFound(Error{Status: 404}), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsNotFound(Error{Status: 200}), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
|
||||
if got, want := IsNotFound(&http.Response{StatusCode: 404}), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsNotFound(&http.Response{StatusCode: 200}), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsTimeout(t *testing.T) {
|
||||
if got, want := IsTimeout(nil), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsTimeout(""), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsTimeout(200), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsTimeout(408), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
|
||||
if got, want := IsTimeout(&Error{Status: 408}), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsTimeout(&Error{Status: 200}), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
|
||||
if got, want := IsTimeout(Error{Status: 408}), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsTimeout(Error{Status: 200}), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
|
||||
if got, want := IsTimeout(&http.Response{StatusCode: 408}), true; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
if got, want := IsTimeout(&http.Response{StatusCode: 200}), false; got != want {
|
||||
t.Errorf("expected %v; got: %v", want, got)
|
||||
}
|
||||
}
|
||||
587
vendor/gopkg.in/olivere/elastic.v5/example_test.go
generated
vendored
Normal file
587
vendor/gopkg.in/olivere/elastic.v5/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,587 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
elastic "gopkg.in/olivere/elastic.v5"
|
||||
)
|
||||
|
||||
type Tweet struct {
|
||||
User string `json:"user"`
|
||||
Message string `json:"message"`
|
||||
Retweets int `json:"retweets"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Location string `json:"location,omitempty"`
|
||||
Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
|
||||
}
|
||||
|
||||
func Example() {
|
||||
errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)
|
||||
|
||||
// Obtain a client. You can also provide your own HTTP client here.
|
||||
client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Trace request and response details like this
|
||||
//client.SetTracer(log.New(os.Stdout, "", 0))
|
||||
|
||||
// Ping the Elasticsearch server to get e.g. the version number
|
||||
info, code, err := client.Ping("http://127.0.0.1:9200").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Elasticsearch returned with code %d and version %s\n", code, info.Version.Number)
|
||||
|
||||
// Getting the ES version number is quite common, so there's a shortcut
|
||||
esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Elasticsearch version %s\n", esversion)
|
||||
|
||||
// Use the IndexExists service to check if a specified index exists.
|
||||
exists, err := client.IndexExists("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if !exists {
|
||||
// Create a new index.
|
||||
mapping := `
|
||||
{
|
||||
"settings":{
|
||||
"number_of_shards":1,
|
||||
"number_of_replicas":0
|
||||
},
|
||||
"mappings":{
|
||||
"_default_": {
|
||||
"_all": {
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"tweet":{
|
||||
"properties":{
|
||||
"user":{
|
||||
"type":"keyword"
|
||||
},
|
||||
"message":{
|
||||
"type":"text",
|
||||
"store": true,
|
||||
"fielddata": true
|
||||
},
|
||||
"retweets":{
|
||||
"type":"long"
|
||||
},
|
||||
"tags":{
|
||||
"type":"keyword"
|
||||
},
|
||||
"location":{
|
||||
"type":"geo_point"
|
||||
},
|
||||
"suggest_field":{
|
||||
"type":"completion"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
createIndex, err := client.CreateIndex("twitter").Body(mapping).Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
// Not acknowledged
|
||||
}
|
||||
}
|
||||
|
||||
// Index a tweet (using JSON serialization)
|
||||
tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
|
||||
put1, err := client.Index().
|
||||
Index("twitter").
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
BodyJson(tweet1).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
|
||||
|
||||
// Index a second tweet (by string)
|
||||
tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
|
||||
put2, err := client.Index().
|
||||
Index("twitter").
|
||||
Type("tweet").
|
||||
Id("2").
|
||||
BodyString(tweet2).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
|
||||
|
||||
// Get tweet with specified ID
|
||||
get1, err := client.Get().
|
||||
Index("twitter").
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if get1.Found {
|
||||
fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
|
||||
}
|
||||
|
||||
// Flush to make sure the documents got written.
|
||||
_, err = client.Flush().Index("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Search with a term query
|
||||
termQuery := elastic.NewTermQuery("user", "olivere")
|
||||
searchResult, err := client.Search().
|
||||
Index("twitter"). // search in index "twitter"
|
||||
Query(termQuery). // specify the query
|
||||
Sort("user", true). // sort by "user" field, ascending
|
||||
From(0).Size(10). // take documents 0-9
|
||||
Pretty(true). // pretty print request and response JSON
|
||||
Do(context.Background()) // execute
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// searchResult is of type SearchResult and returns hits, suggestions,
|
||||
// and all kinds of other information from Elasticsearch.
|
||||
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
|
||||
|
||||
// Each is a convenience function that iterates over hits in a search result.
|
||||
// It makes sure you don't need to check for nil values in the response.
|
||||
// However, it ignores errors in serialization. If you want full control
|
||||
// over iterating the hits, see below.
|
||||
var ttyp Tweet
|
||||
for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
|
||||
t := item.(Tweet)
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
// TotalHits is another convenience function that works even when something goes wrong.
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
|
||||
|
||||
// Here's how you iterate through results with full control over each step.
|
||||
if searchResult.Hits.TotalHits > 0 {
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
|
||||
|
||||
// Iterate through results
|
||||
for _, hit := range searchResult.Hits.Hits {
|
||||
// hit.Index contains the name of the index
|
||||
|
||||
// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
|
||||
var t Tweet
|
||||
err := json.Unmarshal(*hit.Source, &t)
|
||||
if err != nil {
|
||||
// Deserialization failed
|
||||
}
|
||||
|
||||
// Work with tweet
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
} else {
|
||||
// No hits
|
||||
fmt.Print("Found no tweets\n")
|
||||
}
|
||||
|
||||
// Update a tweet by the update API of Elasticsearch.
|
||||
// We just increment the number of retweets.
|
||||
script := elastic.NewScript("ctx._source.retweets += params.num").Param("num", 1)
|
||||
update, err := client.Update().Index("twitter").Type("tweet").Id("1").
|
||||
Script(script).
|
||||
Upsert(map[string]interface{}{"retweets": 0}).
|
||||
Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
|
||||
|
||||
// ...
|
||||
|
||||
// Delete an index.
|
||||
deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
// Not acknowledged
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_NewClient_default() {
|
||||
// Obtain a client to the Elasticsearch instance on http://127.0.0.1:9200.
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
fmt.Printf("connection failed: %v\n", err)
|
||||
} else {
|
||||
fmt.Println("connected")
|
||||
}
|
||||
_ = client
|
||||
// Output:
|
||||
// connected
|
||||
}
|
||||
|
||||
func ExampleClient_NewClient_cluster() {
|
||||
// Obtain a client for an Elasticsearch cluster of two nodes,
|
||||
// running on 10.0.1.1 and 10.0.1.2.
|
||||
client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"))
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
_ = client
|
||||
}
|
||||
|
||||
func ExampleClient_NewClient_manyOptions() {
|
||||
// Obtain a client for an Elasticsearch cluster of two nodes,
|
||||
// running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer.
|
||||
// Set the healthcheck interval to 10s. When requests fail,
|
||||
// retry 5 times. Print error messages to os.Stderr and informational
|
||||
// messages to os.Stdout.
|
||||
client, err := elastic.NewClient(
|
||||
elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"),
|
||||
elastic.SetSniff(false),
|
||||
elastic.SetHealthcheckInterval(10*time.Second),
|
||||
elastic.SetMaxRetries(5),
|
||||
elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
|
||||
elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
_ = client
|
||||
}
|
||||
|
||||
func ExampleIndexExistsService() {
|
||||
// Get a client to the local Elasticsearch instance.
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
// Use the IndexExists service to check if the index "twitter" exists.
|
||||
exists, err := client.IndexExists("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if exists {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCreateIndexService() {
|
||||
// Get a client to the local Elasticsearch instance.
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
// Create a new index.
|
||||
createIndex, err := client.CreateIndex("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
// Not acknowledged
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDeleteIndexService() {
|
||||
// Get a client to the local Elasticsearch instance.
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
// Delete an index.
|
||||
deleteIndex, err := client.DeleteIndex("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
// Not acknowledged
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSearchService() {
|
||||
// Get a client to the local Elasticsearch instance.
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Search with a term query
|
||||
termQuery := elastic.NewTermQuery("user", "olivere")
|
||||
searchResult, err := client.Search().
|
||||
Index("twitter"). // search in index "twitter"
|
||||
Query(termQuery). // specify the query
|
||||
Sort("user", true). // sort by "user" field, ascending
|
||||
From(0).Size(10). // take documents 0-9
|
||||
Pretty(true). // pretty print request and response JSON
|
||||
Do(context.Background()) // execute
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// searchResult is of type SearchResult and returns hits, suggestions,
|
||||
// and all kinds of other information from Elasticsearch.
|
||||
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
|
||||
|
||||
// Number of hits
|
||||
if searchResult.Hits.TotalHits > 0 {
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
|
||||
|
||||
// Iterate through results
|
||||
for _, hit := range searchResult.Hits.Hits {
|
||||
// hit.Index contains the name of the index
|
||||
|
||||
// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
|
||||
var t Tweet
|
||||
err := json.Unmarshal(*hit.Source, &t)
|
||||
if err != nil {
|
||||
// Deserialization failed
|
||||
}
|
||||
|
||||
// Work with tweet
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
} else {
|
||||
// No hits
|
||||
fmt.Print("Found no tweets\n")
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleAggregations() {
|
||||
// Get a client to the local Elasticsearch instance.
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
|
||||
timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
|
||||
histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
|
||||
timeline = timeline.SubAggregation("history", histogram)
|
||||
|
||||
// Search with a term query
|
||||
searchResult, err := client.Search().
|
||||
Index("twitter"). // search in index "twitter"
|
||||
Query(elastic.NewMatchAllQuery()). // return all results, but ...
|
||||
SearchType("count"). // ... do not return hits, just the count
|
||||
Aggregation("timeline", timeline). // add our aggregation to the query
|
||||
Pretty(true). // pretty print request and response JSON
|
||||
Do(context.Background()) // execute
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Access "timeline" aggregate in search result.
|
||||
agg, found := searchResult.Aggregations.Terms("timeline")
|
||||
if !found {
|
||||
log.Fatalf("we should have a terms aggregation called %q", "timeline")
|
||||
}
|
||||
for _, userBucket := range agg.Buckets {
|
||||
// Every bucket should have the user field as key.
|
||||
user := userBucket.Key
|
||||
|
||||
// The sub-aggregation history should have the number of tweets per year.
|
||||
histogram, found := userBucket.DateHistogram("history")
|
||||
if found {
|
||||
for _, year := range histogram.Buckets {
|
||||
fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleSearchResult() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Do a search
|
||||
searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// searchResult is of type SearchResult and returns hits, suggestions,
|
||||
// and all kinds of other information from Elasticsearch.
|
||||
fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
|
||||
|
||||
// Each is a utility function that iterates over hits in a search result.
|
||||
// It makes sure you don't need to check for nil values in the response.
|
||||
// However, it ignores errors in serialization. If you want full control
|
||||
// over iterating the hits, see below.
|
||||
var ttyp Tweet
|
||||
for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
|
||||
t := item.(Tweet)
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
|
||||
|
||||
// Here's how you iterate hits with full control.
|
||||
if searchResult.Hits.TotalHits > 0 {
|
||||
fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
|
||||
|
||||
// Iterate through results
|
||||
for _, hit := range searchResult.Hits.Hits {
|
||||
// hit.Index contains the name of the index
|
||||
|
||||
// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
|
||||
var t Tweet
|
||||
err := json.Unmarshal(*hit.Source, &t)
|
||||
if err != nil {
|
||||
// Deserialization failed
|
||||
}
|
||||
|
||||
// Work with tweet
|
||||
fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
|
||||
}
|
||||
} else {
|
||||
// No hits
|
||||
fmt.Print("Found no tweets\n")
|
||||
}
|
||||
}
|
||||
|
||||
func ExamplePutTemplateService() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create search template
|
||||
tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
|
||||
|
||||
// Create template
|
||||
resp, err := client.PutTemplate().
|
||||
Id("my-search-template"). // Name of the template
|
||||
BodyString(tmpl). // Search template itself
|
||||
Do(context.Background()) // Execute
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if resp.Acknowledged {
|
||||
fmt.Println("search template creation acknowledged")
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleGetTemplateService() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Get template stored under "my-search-template"
|
||||
resp, err := client.GetTemplate().Id("my-search-template").Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("search template is: %q\n", resp.Template)
|
||||
}
|
||||
|
||||
func ExampleDeleteTemplateService() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Delete template
|
||||
resp, err := client.DeleteTemplate().Id("my-search-template").Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if resp != nil && resp.Acknowledged {
|
||||
fmt.Println("template deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClusterHealthService() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Get cluster health
|
||||
res, err := client.ClusterHealth().Index("twitter").Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if res == nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Cluster status is %q\n", res.Status)
|
||||
}
|
||||
|
||||
func ExampleClusterHealthService_WaitForGreen() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Wait for status green
|
||||
res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if res.TimedOut {
|
||||
fmt.Printf("time out waiting for cluster status %q\n", "green")
|
||||
} else {
|
||||
fmt.Printf("cluster status is %q\n", res.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClusterStateService() {
|
||||
client, err := elastic.NewClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Get cluster state
|
||||
res, err := client.ClusterState().Metric("version").Do(context.Background())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
|
||||
}
|
||||
176
vendor/gopkg.in/olivere/elastic.v5/exists.go
generated
vendored
Normal file
176
vendor/gopkg.in/olivere/elastic.v5/exists.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// ExistsService checks for the existence of a document using HEAD.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html
|
||||
// for details.
|
||||
type ExistsService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
id string
|
||||
index string
|
||||
typ string
|
||||
preference string
|
||||
realtime *bool
|
||||
refresh string
|
||||
routing string
|
||||
parent string
|
||||
}
|
||||
|
||||
// NewExistsService creates a new ExistsService.
|
||||
func NewExistsService(client *Client) *ExistsService {
|
||||
return &ExistsService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Id is the document ID.
|
||||
func (s *ExistsService) Id(id string) *ExistsService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Index is the name of the index.
|
||||
func (s *ExistsService) Index(index string) *ExistsService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Type is the type of the document (use `_all` to fetch the first document
|
||||
// matching the ID across all types).
|
||||
func (s *ExistsService) Type(typ string) *ExistsService {
|
||||
s.typ = typ
|
||||
return s
|
||||
}
|
||||
|
||||
// Preference specifies the node or shard the operation should be performed on (default: random).
|
||||
func (s *ExistsService) Preference(preference string) *ExistsService {
|
||||
s.preference = preference
|
||||
return s
|
||||
}
|
||||
|
||||
// Realtime specifies whether to perform the operation in realtime or search mode.
|
||||
func (s *ExistsService) Realtime(realtime bool) *ExistsService {
|
||||
s.realtime = &realtime
|
||||
return s
|
||||
}
|
||||
|
||||
// Refresh the shard containing the document before performing the operation.
|
||||
func (s *ExistsService) Refresh(refresh string) *ExistsService {
|
||||
s.refresh = refresh
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing is a specific routing value.
|
||||
func (s *ExistsService) Routing(routing string) *ExistsService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent document.
|
||||
func (s *ExistsService) Parent(parent string) *ExistsService {
|
||||
s.parent = parent
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *ExistsService) Pretty(pretty bool) *ExistsService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *ExistsService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||
"id": s.id,
|
||||
"index": s.index,
|
||||
"type": s.typ,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.realtime != nil {
|
||||
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
|
||||
}
|
||||
if s.refresh != "" {
|
||||
params.Set("refresh", s.refresh)
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
if s.parent != "" {
|
||||
params.Set("parent", s.parent)
|
||||
}
|
||||
if s.preference != "" {
|
||||
params.Set("preference", s.preference)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *ExistsService) Validate() error {
|
||||
var invalid []string
|
||||
if s.id == "" {
|
||||
invalid = append(invalid, "Id")
|
||||
}
|
||||
if s.index == "" {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if s.typ == "" {
|
||||
invalid = append(invalid, "Type")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *ExistsService) Do(ctx context.Context) (bool, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
return true, nil
|
||||
case http.StatusNotFound:
|
||||
return false, nil
|
||||
default:
|
||||
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||
}
|
||||
}
|
||||
53
vendor/gopkg.in/olivere/elastic.v5/exists_test.go
generated
vendored
Normal file
53
vendor/gopkg.in/olivere/elastic.v5/exists_test.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndexAndAddDocs(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
|
||||
exists, err := client.Exists().Index(testIndexName).Type("comment").Id("1").Parent("tweet").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Fatal("expected document to exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExistsValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No index -> fail with error
|
||||
res, err := NewExistsService(client).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without index name")
|
||||
}
|
||||
if res != false {
|
||||
t.Fatalf("expected result to be false; got: %v", res)
|
||||
}
|
||||
|
||||
// No type -> fail with error
|
||||
res, err = NewExistsService(client).Index(testIndexName).Id("1").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without index name")
|
||||
}
|
||||
if res != false {
|
||||
t.Fatalf("expected result to be false; got: %v", res)
|
||||
}
|
||||
|
||||
// No id -> fail with error
|
||||
res, err = NewExistsService(client).Index(testIndexName).Type("tweet").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Delete to fail without index name")
|
||||
}
|
||||
if res != false {
|
||||
t.Fatalf("expected result to be false; got: %v", res)
|
||||
}
|
||||
}
|
||||
321
vendor/gopkg.in/olivere/elastic.v5/explain.go
generated
vendored
Normal file
321
vendor/gopkg.in/olivere/elastic.v5/explain.go
generated
vendored
Normal file
@@ -0,0 +1,321 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// ExplainService computes a score explanation for a query and
|
||||
// a specific document.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-explain.html.
|
||||
type ExplainService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
id string
|
||||
index string
|
||||
typ string
|
||||
q string
|
||||
routing string
|
||||
lenient *bool
|
||||
analyzer string
|
||||
df string
|
||||
fields []string
|
||||
lowercaseExpandedTerms *bool
|
||||
xSourceInclude []string
|
||||
analyzeWildcard *bool
|
||||
parent string
|
||||
preference string
|
||||
xSource []string
|
||||
defaultOperator string
|
||||
xSourceExclude []string
|
||||
source string
|
||||
bodyJson interface{}
|
||||
bodyString string
|
||||
}
|
||||
|
||||
// NewExplainService creates a new ExplainService.
|
||||
func NewExplainService(client *Client) *ExplainService {
|
||||
return &ExplainService{
|
||||
client: client,
|
||||
xSource: make([]string, 0),
|
||||
xSourceExclude: make([]string, 0),
|
||||
fields: make([]string, 0),
|
||||
xSourceInclude: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Id is the document ID.
|
||||
func (s *ExplainService) Id(id string) *ExplainService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Index is the name of the index.
|
||||
func (s *ExplainService) Index(index string) *ExplainService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Type is the type of the document.
|
||||
func (s *ExplainService) Type(typ string) *ExplainService {
|
||||
s.typ = typ
|
||||
return s
|
||||
}
|
||||
|
||||
// Source is the URL-encoded query definition (instead of using the request body).
|
||||
func (s *ExplainService) Source(source string) *ExplainService {
|
||||
s.source = source
|
||||
return s
|
||||
}
|
||||
|
||||
// XSourceExclude is a list of fields to exclude from the returned _source field.
|
||||
func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
|
||||
s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Lenient specifies whether format-based query failures
|
||||
// (such as providing text to a numeric field) should be ignored.
|
||||
func (s *ExplainService) Lenient(lenient bool) *ExplainService {
|
||||
s.lenient = &lenient
|
||||
return s
|
||||
}
|
||||
|
||||
// Query in the Lucene query string syntax.
|
||||
func (s *ExplainService) Q(q string) *ExplainService {
|
||||
s.q = q
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing sets a specific routing value.
|
||||
func (s *ExplainService) Routing(routing string) *ExplainService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// AnalyzeWildcard specifies whether wildcards and prefix queries
|
||||
// in the query string query should be analyzed (default: false).
|
||||
func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
|
||||
s.analyzeWildcard = &analyzeWildcard
|
||||
return s
|
||||
}
|
||||
|
||||
// Analyzer is the analyzer for the query string query.
|
||||
func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
|
||||
s.analyzer = analyzer
|
||||
return s
|
||||
}
|
||||
|
||||
// Df is the default field for query string query (default: _all).
|
||||
func (s *ExplainService) Df(df string) *ExplainService {
|
||||
s.df = df
|
||||
return s
|
||||
}
|
||||
|
||||
// Fields is a list of fields to return in the response.
|
||||
func (s *ExplainService) Fields(fields ...string) *ExplainService {
|
||||
s.fields = append(s.fields, fields...)
|
||||
return s
|
||||
}
|
||||
|
||||
// LowercaseExpandedTerms specifies whether query terms should be lowercased.
|
||||
func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
|
||||
s.lowercaseExpandedTerms = &lowercaseExpandedTerms
|
||||
return s
|
||||
}
|
||||
|
||||
// XSourceInclude is a list of fields to extract and return from the _source field.
|
||||
func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
|
||||
s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
|
||||
return s
|
||||
}
|
||||
|
||||
// DefaultOperator is the default operator for query string query (AND or OR).
|
||||
func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
|
||||
s.defaultOperator = defaultOperator
|
||||
return s
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent document.
|
||||
func (s *ExplainService) Parent(parent string) *ExplainService {
|
||||
s.parent = parent
|
||||
return s
|
||||
}
|
||||
|
||||
// Preference specifies the node or shard the operation should be performed on (default: random).
|
||||
func (s *ExplainService) Preference(preference string) *ExplainService {
|
||||
s.preference = preference
|
||||
return s
|
||||
}
|
||||
|
||||
// XSource is true or false to return the _source field or not, or a list of fields to return.
|
||||
func (s *ExplainService) XSource(xSource ...string) *ExplainService {
|
||||
s.xSource = append(s.xSource, xSource...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *ExplainService) Pretty(pretty bool) *ExplainService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// Query sets a query definition using the Query DSL.
|
||||
func (s *ExplainService) Query(query Query) *ExplainService {
|
||||
src, err := query.Source()
|
||||
if err != nil {
|
||||
// Do nothing in case of an error
|
||||
return s
|
||||
}
|
||||
body := make(map[string]interface{})
|
||||
body["query"] = src
|
||||
s.bodyJson = body
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyJson sets the query definition using the Query DSL.
|
||||
func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
|
||||
s.bodyJson = body
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyString sets the query definition using the Query DSL as a string.
|
||||
func (s *ExplainService) BodyString(body string) *ExplainService {
|
||||
s.bodyString = body
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *ExplainService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
|
||||
"id": s.id,
|
||||
"index": s.index,
|
||||
"type": s.typ,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if len(s.xSource) > 0 {
|
||||
params.Set("_source", strings.Join(s.xSource, ","))
|
||||
}
|
||||
if s.defaultOperator != "" {
|
||||
params.Set("default_operator", s.defaultOperator)
|
||||
}
|
||||
if s.parent != "" {
|
||||
params.Set("parent", s.parent)
|
||||
}
|
||||
if s.preference != "" {
|
||||
params.Set("preference", s.preference)
|
||||
}
|
||||
if s.source != "" {
|
||||
params.Set("source", s.source)
|
||||
}
|
||||
if len(s.xSourceExclude) > 0 {
|
||||
params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
|
||||
}
|
||||
if s.lenient != nil {
|
||||
params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
|
||||
}
|
||||
if s.q != "" {
|
||||
params.Set("q", s.q)
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
if len(s.fields) > 0 {
|
||||
params.Set("fields", strings.Join(s.fields, ","))
|
||||
}
|
||||
if s.lowercaseExpandedTerms != nil {
|
||||
params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
|
||||
}
|
||||
if len(s.xSourceInclude) > 0 {
|
||||
params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
|
||||
}
|
||||
if s.analyzeWildcard != nil {
|
||||
params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
|
||||
}
|
||||
if s.analyzer != "" {
|
||||
params.Set("analyzer", s.analyzer)
|
||||
}
|
||||
if s.df != "" {
|
||||
params.Set("df", s.df)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *ExplainService) Validate() error {
|
||||
var invalid []string
|
||||
if s.index == "" {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if s.typ == "" {
|
||||
invalid = append(invalid, "Type")
|
||||
}
|
||||
if s.id == "" {
|
||||
invalid = append(invalid, "Id")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
var body interface{}
|
||||
if s.bodyJson != nil {
|
||||
body = s.bodyJson
|
||||
} else {
|
||||
body = s.bodyString
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(ExplainResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ExplainResponse is the response of ExplainService.Do.
|
||||
type ExplainResponse struct {
|
||||
Index string `json:"_index"`
|
||||
Type string `json:"_type"`
|
||||
Id string `json:"_id"`
|
||||
Matched bool `json:"matched"`
|
||||
Explanation map[string]interface{} `json:"explanation"`
|
||||
}
|
||||
44
vendor/gopkg.in/olivere/elastic.v5/explain_test.go
generated
vendored
Normal file
44
vendor/gopkg.in/olivere/elastic.v5/explain_test.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExplain(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
|
||||
// Add a document
|
||||
indexResult, err := client.Index().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
BodyJson(&tweet1).
|
||||
Refresh("true").
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indexResult == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", indexResult)
|
||||
}
|
||||
|
||||
// Explain
|
||||
query := NewTermQuery("user", "olivere")
|
||||
expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expl == nil {
|
||||
t.Fatal("expected to return an explanation")
|
||||
}
|
||||
if !expl.Matched {
|
||||
t.Errorf("expected matched to be %v; got: %v", true, expl.Matched)
|
||||
}
|
||||
}
|
||||
74
vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go
generated
vendored
Normal file
74
vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type FetchSourceContext struct {
|
||||
fetchSource bool
|
||||
transformSource bool
|
||||
includes []string
|
||||
excludes []string
|
||||
}
|
||||
|
||||
func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
|
||||
return &FetchSourceContext{
|
||||
fetchSource: fetchSource,
|
||||
includes: make([]string, 0),
|
||||
excludes: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (fsc *FetchSourceContext) FetchSource() bool {
|
||||
return fsc.fetchSource
|
||||
}
|
||||
|
||||
func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
|
||||
fsc.fetchSource = fetchSource
|
||||
}
|
||||
|
||||
func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
|
||||
fsc.includes = append(fsc.includes, includes...)
|
||||
return fsc
|
||||
}
|
||||
|
||||
func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
|
||||
fsc.excludes = append(fsc.excludes, excludes...)
|
||||
return fsc
|
||||
}
|
||||
|
||||
func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
|
||||
fsc.transformSource = transformSource
|
||||
return fsc
|
||||
}
|
||||
|
||||
func (fsc *FetchSourceContext) Source() (interface{}, error) {
|
||||
if !fsc.fetchSource {
|
||||
return false, nil
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"includes": fsc.includes,
|
||||
"excludes": fsc.excludes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Query returns the parameters in a form suitable for a URL query string.
|
||||
func (fsc *FetchSourceContext) Query() url.Values {
|
||||
params := url.Values{}
|
||||
if !fsc.fetchSource {
|
||||
params.Add("_source", "false")
|
||||
return params
|
||||
}
|
||||
if len(fsc.includes) > 0 {
|
||||
params.Add("_source_include", strings.Join(fsc.includes, ","))
|
||||
}
|
||||
if len(fsc.excludes) > 0 {
|
||||
params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
|
||||
}
|
||||
return params
|
||||
}
|
||||
125
vendor/gopkg.in/olivere/elastic.v5/fetch_source_context_test.go
generated
vendored
Normal file
125
vendor/gopkg.in/olivere/elastic.v5/fetch_source_context_test.go
generated
vendored
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFetchSourceContextNoFetchSource(t *testing.T) {
|
||||
builder := NewFetchSourceContext(false)
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `false`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
|
||||
builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `false`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextFetchSource(t *testing.T) {
|
||||
builder := NewFetchSourceContext(true)
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"excludes":[],"includes":[]}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextFetchSourceWithIncludesOnly(t *testing.T) {
|
||||
builder := NewFetchSourceContext(true).Include("a", "b")
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"excludes":[],"includes":["a","b"]}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
|
||||
builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"excludes":["c"],"includes":["a","b"]}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextQueryDefaults(t *testing.T) {
|
||||
builder := NewFetchSourceContext(true)
|
||||
values := builder.Query()
|
||||
got := values.Encode()
|
||||
expected := ""
|
||||
if got != expected {
|
||||
t.Errorf("expected %q; got: %q", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
|
||||
builder := NewFetchSourceContext(false)
|
||||
values := builder.Query()
|
||||
got := values.Encode()
|
||||
expected := "_source=false"
|
||||
if got != expected {
|
||||
t.Errorf("expected %q; got: %q", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
|
||||
builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
|
||||
values := builder.Query()
|
||||
got := values.Encode()
|
||||
expected := "_source_exclude=c&_source_include=a%2Cb"
|
||||
if got != expected {
|
||||
t.Errorf("expected %q; got: %q", expected, got)
|
||||
}
|
||||
}
|
||||
259
vendor/gopkg.in/olivere/elastic.v5/field_stats.go
generated
vendored
Normal file
259
vendor/gopkg.in/olivere/elastic.v5/field_stats.go
generated
vendored
Normal file
@@ -0,0 +1,259 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
const (
|
||||
FieldStatsClusterLevel = "cluster"
|
||||
FieldStatsIndicesLevel = "indices"
|
||||
)
|
||||
|
||||
// FieldStatsService allows finding statistical properties of a field without executing a search,
|
||||
// but looking up measurements that are natively available in the Lucene index.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-field-stats.html
|
||||
// for details
|
||||
type FieldStatsService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
level string
|
||||
index []string
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
fields []string
|
||||
ignoreUnavailable *bool
|
||||
bodyJson interface{}
|
||||
bodyString string
|
||||
}
|
||||
|
||||
// NewFieldStatsService creates a new FieldStatsService
|
||||
func NewFieldStatsService(client *Client) *FieldStatsService {
|
||||
return &FieldStatsService{
|
||||
client: client,
|
||||
index: make([]string, 0),
|
||||
fields: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names; use `_all` or empty string to perform
|
||||
// the operation on all indices.
|
||||
func (s *FieldStatsService) Index(index ...string) *FieldStatsService {
|
||||
s.index = append(s.index, index...)
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||
// resolves into no concrete indices.
|
||||
// (This includes `_all` string or when no indices have been specified).
|
||||
func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both.
|
||||
func (s *FieldStatsService) ExpandWildcards(expandWildcards string) *FieldStatsService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Fields is a list of fields for to get field statistics
|
||||
// for (min value, max value, and more).
|
||||
func (s *FieldStatsService) Fields(fields ...string) *FieldStatsService {
|
||||
s.fields = append(s.fields, fields...)
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed).
|
||||
func (s *FieldStatsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldStatsService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// Level sets if stats should be returned on a per index level or on a cluster wide level;
|
||||
// should be one of 'cluster' or 'indices'; defaults to former
|
||||
func (s *FieldStatsService) Level(level string) *FieldStatsService {
|
||||
s.level = level
|
||||
return s
|
||||
}
|
||||
|
||||
// ClusterLevel is a helper that sets Level to "cluster".
|
||||
func (s *FieldStatsService) ClusterLevel() *FieldStatsService {
|
||||
s.level = FieldStatsClusterLevel
|
||||
return s
|
||||
}
|
||||
|
||||
// IndicesLevel is a helper that sets Level to "indices".
|
||||
func (s *FieldStatsService) IndicesLevel() *FieldStatsService {
|
||||
s.level = FieldStatsIndicesLevel
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *FieldStatsService) Pretty(pretty bool) *FieldStatsService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
|
||||
func (s *FieldStatsService) BodyJson(body interface{}) *FieldStatsService {
|
||||
s.bodyJson = body
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds.
|
||||
func (s *FieldStatsService) BodyString(body string) *FieldStatsService {
|
||||
s.bodyString = body
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *FieldStatsService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
var err error
|
||||
var path string
|
||||
if len(s.index) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/_field_stats", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
} else {
|
||||
path = "/_field_stats"
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if len(s.fields) > 0 {
|
||||
params.Set("fields", strings.Join(s.fields, ","))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.level != "" {
|
||||
params.Set("level", s.level)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *FieldStatsService) Validate() error {
|
||||
var invalid []string
|
||||
if s.level != "" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) {
|
||||
invalid = append(invalid, "Level")
|
||||
}
|
||||
if len(invalid) != 0 {
|
||||
return fmt.Errorf("missing or invalid required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *FieldStatsService) Do(ctx context.Context) (*FieldStatsResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
var body interface{}
|
||||
if s.bodyJson != nil {
|
||||
body = s.bodyJson
|
||||
} else {
|
||||
body = s.bodyString
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, body, http.StatusNotFound)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(oe): Is 404 really a valid response here?
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return &FieldStatsResponse{make(map[string]IndexFieldStats)}, nil
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(FieldStatsResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Request --
|
||||
|
||||
// FieldStatsRequest can be used to set up the body to be used in the
|
||||
// Field Stats API.
|
||||
type FieldStatsRequest struct {
|
||||
Fields []string `json:"fields"`
|
||||
IndexConstraints map[string]*FieldStatsConstraints `json:"index_constraints,omitempty"`
|
||||
}
|
||||
|
||||
// FieldStatsConstraints is a constraint on a field.
|
||||
type FieldStatsConstraints struct {
|
||||
Min *FieldStatsComparison `json:"min_value,omitempty"`
|
||||
Max *FieldStatsComparison `json:"max_value,omitempty"`
|
||||
}
|
||||
|
||||
// FieldStatsComparison contain all comparison operations that can be used
|
||||
// in FieldStatsConstraints.
|
||||
type FieldStatsComparison struct {
|
||||
Lte interface{} `json:"lte,omitempty"`
|
||||
Lt interface{} `json:"lt,omitempty"`
|
||||
Gte interface{} `json:"gte,omitempty"`
|
||||
Gt interface{} `json:"gt,omitempty"`
|
||||
}
|
||||
|
||||
// -- Response --
|
||||
|
||||
// FieldStatsResponse is the response body content
|
||||
type FieldStatsResponse struct {
|
||||
Indices map[string]IndexFieldStats `json:"indices,omitempty"`
|
||||
}
|
||||
|
||||
// IndexFieldStats contains field stats for an index
|
||||
type IndexFieldStats struct {
|
||||
Fields map[string]FieldStats `json:"fields,omitempty"`
|
||||
}
|
||||
|
||||
// FieldStats contains stats of an individual field
|
||||
type FieldStats struct {
|
||||
Type string `json:"type"`
|
||||
MaxDoc int64 `json:"max_doc"`
|
||||
DocCount int64 `json:"doc_count"`
|
||||
Density int64 `json:"density"`
|
||||
SumDocFrequeny int64 `json:"sum_doc_freq"`
|
||||
SumTotalTermFrequency int64 `json:"sum_total_term_freq"`
|
||||
Searchable bool `json:"searchable"`
|
||||
Aggregatable bool `json:"aggregatable"`
|
||||
MinValue interface{} `json:"min_value"`
|
||||
MinValueAsString string `json:"min_value_as_string"`
|
||||
MaxValue interface{} `json:"max_value"`
|
||||
MaxValueAsString string `json:"max_value_as_string"`
|
||||
}
|
||||
282
vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go
generated
vendored
Normal file
282
vendor/gopkg.in/olivere/elastic.v5/field_stats_test.go
generated
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFieldStatsURLs(t *testing.T) {
|
||||
tests := []struct {
|
||||
Service *FieldStatsService
|
||||
ExpectedPath string
|
||||
ExpectedParams url.Values
|
||||
}{
|
||||
{
|
||||
Service: &FieldStatsService{},
|
||||
ExpectedPath: "/_field_stats",
|
||||
ExpectedParams: url.Values{},
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsClusterLevel,
|
||||
},
|
||||
ExpectedPath: "/_field_stats",
|
||||
ExpectedParams: url.Values{"level": []string{FieldStatsClusterLevel}},
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsIndicesLevel,
|
||||
},
|
||||
ExpectedPath: "/_field_stats",
|
||||
ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}},
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsClusterLevel,
|
||||
index: []string{"index1"},
|
||||
},
|
||||
ExpectedPath: "/index1/_field_stats",
|
||||
ExpectedParams: url.Values{"level": []string{FieldStatsClusterLevel}},
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsIndicesLevel,
|
||||
index: []string{"index1", "index2"},
|
||||
},
|
||||
ExpectedPath: "/index1%2Cindex2/_field_stats",
|
||||
ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}},
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsIndicesLevel,
|
||||
index: []string{"index_*"},
|
||||
},
|
||||
ExpectedPath: "/index_%2A/_field_stats",
|
||||
ExpectedParams: url.Values{"level": []string{FieldStatsIndicesLevel}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
gotPath, gotParams, err := test.Service.buildURL()
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if gotPath != test.ExpectedPath {
|
||||
t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
|
||||
}
|
||||
if gotParams.Encode() != test.ExpectedParams.Encode() {
|
||||
t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldStatsValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
Service *FieldStatsService
|
||||
Valid bool
|
||||
}{
|
||||
{
|
||||
Service: &FieldStatsService{},
|
||||
Valid: true,
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
fields: []string{"field"},
|
||||
},
|
||||
Valid: true,
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
bodyJson: &FieldStatsRequest{
|
||||
Fields: []string{"field"},
|
||||
},
|
||||
},
|
||||
Valid: true,
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsClusterLevel,
|
||||
bodyJson: &FieldStatsRequest{
|
||||
Fields: []string{"field"},
|
||||
},
|
||||
},
|
||||
Valid: true,
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: FieldStatsIndicesLevel,
|
||||
bodyJson: &FieldStatsRequest{
|
||||
Fields: []string{"field"},
|
||||
},
|
||||
},
|
||||
Valid: true,
|
||||
},
|
||||
{
|
||||
Service: &FieldStatsService{
|
||||
level: "random",
|
||||
},
|
||||
Valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := test.Service.Validate()
|
||||
isValid := err == nil
|
||||
if isValid != test.Valid {
|
||||
t.Errorf("expected validity to be %v, got %v", test.Valid, isValid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldStatsRequestSerialize(t *testing.T) {
|
||||
req := &FieldStatsRequest{
|
||||
Fields: []string{"creation_date", "answer_count"},
|
||||
IndexConstraints: map[string]*FieldStatsConstraints{
|
||||
"creation_date": &FieldStatsConstraints{
|
||||
Min: &FieldStatsComparison{Gte: "2014-01-01T00:00:00.000Z"},
|
||||
Max: &FieldStatsComparison{Lt: "2015-01-01T10:00:00.000Z"},
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"fields":["creation_date","answer_count"],"index_constraints":{"creation_date":{"min_value":{"gte":"2014-01-01T00:00:00.000Z"},"max_value":{"lt":"2015-01-01T10:00:00.000Z"}}}}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldStatsRequestDeserialize(t *testing.T) {
|
||||
body := `{
|
||||
"fields" : ["creation_date", "answer_count"],
|
||||
"index_constraints" : {
|
||||
"creation_date" : {
|
||||
"min_value" : {
|
||||
"gte" : "2014-01-01T00:00:00.000Z"
|
||||
},
|
||||
"max_value" : {
|
||||
"lt" : "2015-01-01T10:00:00.000Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
var request FieldStatsRequest
|
||||
if err := json.Unmarshal([]byte(body), &request); err != nil {
|
||||
t.Errorf("unexpected error during unmarshalling: %v", err)
|
||||
}
|
||||
|
||||
sort.Sort(lexicographically{request.Fields})
|
||||
|
||||
expectedFields := []string{"answer_count", "creation_date"}
|
||||
if !reflect.DeepEqual(request.Fields, expectedFields) {
|
||||
t.Errorf("expected fields to be %v, got %v", expectedFields, request.Fields)
|
||||
}
|
||||
|
||||
constraints, ok := request.IndexConstraints["creation_date"]
|
||||
if !ok {
|
||||
t.Errorf("expected field creation_date, didn't find it!")
|
||||
}
|
||||
if constraints.Min.Lt != nil {
|
||||
t.Errorf("expected min value less than constraint to be empty, got %v", constraints.Min.Lt)
|
||||
}
|
||||
if constraints.Min.Gte != "2014-01-01T00:00:00.000Z" {
|
||||
t.Errorf("expected min value >= %v, found %v", "2014-01-01T00:00:00.000Z", constraints.Min.Gte)
|
||||
}
|
||||
if constraints.Max.Lt != "2015-01-01T10:00:00.000Z" {
|
||||
t.Errorf("expected max value < %v, found %v", "2015-01-01T10:00:00.000Z", constraints.Max.Lt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldStatsResponseUnmarshalling(t *testing.T) {
|
||||
clusterStats := `{
|
||||
"_shards": {
|
||||
"total": 1,
|
||||
"successful": 1,
|
||||
"failed": 0
|
||||
},
|
||||
"indices": {
|
||||
"_all": {
|
||||
"fields": {
|
||||
"creation_date": {
|
||||
"type": "date",
|
||||
"max_doc": 1326564,
|
||||
"doc_count": 564633,
|
||||
"density": 42,
|
||||
"sum_doc_freq": 2258532,
|
||||
"sum_total_term_freq": -1,
|
||||
"searchable": true,
|
||||
"aggregatable": true,
|
||||
"min_value":1483016404000,
|
||||
"min_value_as_string": "2016-12-29T13:00:04.000Z",
|
||||
"max_value":1484152326000,
|
||||
"max_value_as_string": "2017-01-11T16:32:06.000Z"
|
||||
},
|
||||
"answer_count": {
|
||||
"max_doc": 1326564,
|
||||
"doc_count": 139885,
|
||||
"density": 10,
|
||||
"sum_doc_freq": 559540,
|
||||
"sum_total_term_freq": -1,
|
||||
"searchable": true,
|
||||
"aggregatable": true,
|
||||
"min_value":1483016404000,
|
||||
"min_value_as_string": "2016-12-29T13:00:04.000Z",
|
||||
"max_value":1484152326000,
|
||||
"max_value_as_string": "2017-01-11T16:32:06.000Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
var response FieldStatsResponse
|
||||
if err := json.Unmarshal([]byte(clusterStats), &response); err != nil {
|
||||
t.Errorf("unexpected error during unmarshalling: %v", err)
|
||||
}
|
||||
|
||||
stats, ok := response.Indices["_all"]
|
||||
if !ok {
|
||||
t.Errorf("expected _all to be in the indices map, didn't find it")
|
||||
}
|
||||
|
||||
fieldStats, ok := stats.Fields["creation_date"]
|
||||
if !ok {
|
||||
t.Errorf("expected creation_date to be in the fields map, didn't find it")
|
||||
}
|
||||
if want, have := true, fieldStats.Searchable; want != have {
|
||||
t.Errorf("expected creation_date searchable to be %v, got %v", want, have)
|
||||
}
|
||||
if want, have := true, fieldStats.Aggregatable; want != have {
|
||||
t.Errorf("expected creation_date aggregatable to be %v, got %v", want, have)
|
||||
}
|
||||
if want, have := "2016-12-29T13:00:04.000Z", fieldStats.MinValueAsString; want != have {
|
||||
t.Errorf("expected creation_date min value string to be %q, got %q", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
type lexicographically struct {
|
||||
strings []string
|
||||
}
|
||||
|
||||
func (l lexicographically) Len() int {
|
||||
return len(l.strings)
|
||||
}
|
||||
|
||||
func (l lexicographically) Less(i, j int) bool {
|
||||
return l.strings[i] < l.strings[j]
|
||||
}
|
||||
|
||||
func (l lexicographically) Swap(i, j int) {
|
||||
l.strings[i], l.strings[j] = l.strings[j], l.strings[i]
|
||||
}
|
||||
48
vendor/gopkg.in/olivere/elastic.v5/geo_point.go
generated
vendored
Normal file
48
vendor/gopkg.in/olivere/elastic.v5/geo_point.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GeoPoint is a geographic position described via latitude and longitude.
|
||||
type GeoPoint struct {
|
||||
Lat float64 `json:"lat"`
|
||||
Lon float64 `json:"lon"`
|
||||
}
|
||||
|
||||
// Source returns the object to be serialized in Elasticsearch DSL.
|
||||
func (pt *GeoPoint) Source() map[string]float64 {
|
||||
return map[string]float64{
|
||||
"lat": pt.Lat,
|
||||
"lon": pt.Lon,
|
||||
}
|
||||
}
|
||||
|
||||
// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
|
||||
func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
|
||||
return &GeoPoint{Lat: lat, Lon: lon}
|
||||
}
|
||||
|
||||
// GeoPointFromString initializes a new GeoPoint by a string that is
|
||||
// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
|
||||
func GeoPointFromString(latLon string) (*GeoPoint, error) {
|
||||
latlon := strings.SplitN(latLon, ",", 2)
|
||||
if len(latlon) != 2 {
|
||||
return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
|
||||
}
|
||||
lat, err := strconv.ParseFloat(latlon[0], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lon, err := strconv.ParseFloat(latlon[1], 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GeoPoint{Lat: lat, Lon: lon}, nil
|
||||
}
|
||||
24
vendor/gopkg.in/olivere/elastic.v5/geo_point_test.go
generated
vendored
Normal file
24
vendor/gopkg.in/olivere/elastic.v5/geo_point_test.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGeoPointSource(t *testing.T) {
|
||||
pt := GeoPoint{Lat: 40, Lon: -70}
|
||||
|
||||
data, err := json.Marshal(pt.Source())
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"lat":40,"lon":-70}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
256
vendor/gopkg.in/olivere/elastic.v5/get.go
generated
vendored
Normal file
256
vendor/gopkg.in/olivere/elastic.v5/get.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// GetService allows to get a typed JSON document from the index based
|
||||
// on its id.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html
|
||||
// for details.
|
||||
type GetService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index string
|
||||
typ string
|
||||
id string
|
||||
routing string
|
||||
preference string
|
||||
storedFields []string
|
||||
refresh string
|
||||
realtime *bool
|
||||
fsc *FetchSourceContext
|
||||
version interface{}
|
||||
versionType string
|
||||
parent string
|
||||
ignoreErrorsOnGeneratedFields *bool
|
||||
}
|
||||
|
||||
// NewGetService creates a new GetService.
|
||||
func NewGetService(client *Client) *GetService {
|
||||
return &GetService{
|
||||
client: client,
|
||||
typ: "_all",
|
||||
}
|
||||
}
|
||||
|
||||
// Index is the name of the index.
|
||||
func (s *GetService) Index(index string) *GetService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Type is the type of the document (use `_all` to fetch the first document
|
||||
// matching the ID across all types).
|
||||
func (s *GetService) Type(typ string) *GetService {
|
||||
s.typ = typ
|
||||
return s
|
||||
}
|
||||
|
||||
// Id is the document ID.
|
||||
func (s *GetService) Id(id string) *GetService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent document.
|
||||
func (s *GetService) Parent(parent string) *GetService {
|
||||
s.parent = parent
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing is the specific routing value.
|
||||
func (s *GetService) Routing(routing string) *GetService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// Preference specifies the node or shard the operation should be performed on (default: random).
|
||||
func (s *GetService) Preference(preference string) *GetService {
|
||||
s.preference = preference
|
||||
return s
|
||||
}
|
||||
|
||||
// StoredFields is a list of fields to return in the response.
|
||||
func (s *GetService) StoredFields(storedFields ...string) *GetService {
|
||||
s.storedFields = append(s.storedFields, storedFields...)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *GetService) FetchSource(fetchSource bool) *GetService {
|
||||
if s.fsc == nil {
|
||||
s.fsc = NewFetchSourceContext(fetchSource)
|
||||
} else {
|
||||
s.fsc.SetFetchSource(fetchSource)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
|
||||
s.fsc = fetchSourceContext
|
||||
return s
|
||||
}
|
||||
|
||||
// Refresh the shard containing the document before performing the operation.
|
||||
func (s *GetService) Refresh(refresh string) *GetService {
|
||||
s.refresh = refresh
|
||||
return s
|
||||
}
|
||||
|
||||
// Realtime specifies whether to perform the operation in realtime or search mode.
|
||||
func (s *GetService) Realtime(realtime bool) *GetService {
|
||||
s.realtime = &realtime
|
||||
return s
|
||||
}
|
||||
|
||||
// VersionType is the specific version type.
|
||||
func (s *GetService) VersionType(versionType string) *GetService {
|
||||
s.versionType = versionType
|
||||
return s
|
||||
}
|
||||
|
||||
// Version is an explicit version number for concurrency control.
|
||||
func (s *GetService) Version(version interface{}) *GetService {
|
||||
s.version = version
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that
|
||||
// are generated if the transaction log is accessed.
|
||||
func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
|
||||
s.ignoreErrorsOnGeneratedFields = &ignore
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *GetService) Pretty(pretty bool) *GetService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *GetService) Validate() error {
|
||||
var invalid []string
|
||||
if s.id == "" {
|
||||
invalid = append(invalid, "Id")
|
||||
}
|
||||
if s.index == "" {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if s.typ == "" {
|
||||
invalid = append(invalid, "Type")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *GetService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||
"id": s.id,
|
||||
"index": s.index,
|
||||
"type": s.typ,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
if s.parent != "" {
|
||||
params.Set("parent", s.parent)
|
||||
}
|
||||
if s.preference != "" {
|
||||
params.Set("preference", s.preference)
|
||||
}
|
||||
if len(s.storedFields) > 0 {
|
||||
params.Set("stored_fields", strings.Join(s.storedFields, ","))
|
||||
}
|
||||
if s.refresh != "" {
|
||||
params.Set("refresh", s.refresh)
|
||||
}
|
||||
if s.version != nil {
|
||||
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||
}
|
||||
if s.versionType != "" {
|
||||
params.Set("version_type", s.versionType)
|
||||
}
|
||||
if s.realtime != nil {
|
||||
params.Set("realtime", fmt.Sprintf("%v", *s.realtime))
|
||||
}
|
||||
if s.ignoreErrorsOnGeneratedFields != nil {
|
||||
params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields))
|
||||
}
|
||||
if s.fsc != nil {
|
||||
for k, values := range s.fsc.Query() {
|
||||
params.Add(k, strings.Join(values, ","))
|
||||
}
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *GetService) Do(ctx context.Context) (*GetResult, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(GetResult)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Result of a get request.
|
||||
|
||||
// GetResult is the outcome of GetService.Do.
|
||||
type GetResult struct {
|
||||
Index string `json:"_index"` // index meta field
|
||||
Type string `json:"_type"` // type meta field
|
||||
Id string `json:"_id"` // id meta field
|
||||
Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields)
|
||||
Routing string `json:"_routing"` // routing meta field
|
||||
Parent string `json:"_parent"` // parent meta field
|
||||
Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService
|
||||
Source *json.RawMessage `json:"_source,omitempty"`
|
||||
Found bool `json:"found,omitempty"`
|
||||
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||
//Error string `json:"error,omitempty"` // used only in MultiGet
|
||||
// TODO double-check that MultiGet now returns details error information
|
||||
Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet
|
||||
}
|
||||
113
vendor/gopkg.in/olivere/elastic.v5/get_template.go
generated
vendored
Normal file
113
vendor/gopkg.in/olivere/elastic.v5/get_template.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// GetTemplateService reads a search template.
|
||||
// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html.
|
||||
type GetTemplateService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
id string
|
||||
version interface{}
|
||||
versionType string
|
||||
}
|
||||
|
||||
// NewGetTemplateService creates a new GetTemplateService.
|
||||
func NewGetTemplateService(client *Client) *GetTemplateService {
|
||||
return &GetTemplateService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Id is the template ID.
|
||||
func (s *GetTemplateService) Id(id string) *GetTemplateService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Version is an explicit version number for concurrency control.
|
||||
func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
|
||||
s.version = version
|
||||
return s
|
||||
}
|
||||
|
||||
// VersionType is a specific version type.
|
||||
func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
|
||||
s.versionType = versionType
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *GetTemplateService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
|
||||
"id": s.id,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.version != nil {
|
||||
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||
}
|
||||
if s.versionType != "" {
|
||||
params.Set("version_type", s.versionType)
|
||||
}
|
||||
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *GetTemplateService) Validate() error {
|
||||
var invalid []string
|
||||
if s.id == "" {
|
||||
invalid = append(invalid, "Id")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation and returns the template.
|
||||
func (s *GetTemplateService) Do(ctx context.Context) (*GetTemplateResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return result
|
||||
ret := new(GetTemplateResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type GetTemplateResponse struct {
|
||||
Template string `json:"template"`
|
||||
}
|
||||
52
vendor/gopkg.in/olivere/elastic.v5/get_template_test.go
generated
vendored
Normal file
52
vendor/gopkg.in/olivere/elastic.v5/get_template_test.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetPutDeleteTemplate(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
// This is a search template, not an index template!
|
||||
tmpl := `{
|
||||
"template": {
|
||||
"query" : { "term" : { "{{my_field}}" : "{{my_value}}" } },
|
||||
"size" : "{{my_size}}"
|
||||
},
|
||||
"params":{
|
||||
"my_field" : "user",
|
||||
"my_value" : "olivere",
|
||||
"my_size" : 5
|
||||
}
|
||||
}`
|
||||
putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if putres == nil {
|
||||
t.Fatalf("expected response; got: %v", putres)
|
||||
}
|
||||
if !putres.Acknowledged {
|
||||
t.Fatalf("expected template creation to be acknowledged; got: %v", putres.Acknowledged)
|
||||
}
|
||||
|
||||
// Always delete template
|
||||
defer client.DeleteTemplate().Id("elastic-template").Do(context.TODO())
|
||||
|
||||
// Get template
|
||||
getres, err := client.GetTemplate().Id("elastic-template").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if getres == nil {
|
||||
t.Fatalf("expected response; got: %v", getres)
|
||||
}
|
||||
if getres.Template == "" {
|
||||
t.Errorf("expected template %q; got: %q", tmpl, getres.Template)
|
||||
}
|
||||
}
|
||||
166
vendor/gopkg.in/olivere/elastic.v5/get_test.go
generated
vendored
Normal file
166
vendor/gopkg.in/olivere/elastic.v5/get_test.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get document 1
|
||||
res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Found != true {
|
||||
t.Errorf("expected Found = true; got %v", res.Found)
|
||||
}
|
||||
if res.Source == nil {
|
||||
t.Errorf("expected Source != nil; got %v", res.Source)
|
||||
}
|
||||
|
||||
// Get non existent document 99
|
||||
res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected error; got: %v", err)
|
||||
}
|
||||
if !IsNotFound(err) {
|
||||
t.Errorf("expected NotFound error; got: %v", err)
|
||||
}
|
||||
if res != nil {
|
||||
t.Errorf("expected no response; got: %v", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWithSourceFiltering(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get document 1, without source
|
||||
res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Found != true {
|
||||
t.Errorf("expected Found = true; got %v", res.Found)
|
||||
}
|
||||
if res.Source != nil {
|
||||
t.Errorf("expected Source == nil; got %v", res.Source)
|
||||
}
|
||||
|
||||
// Get document 1, exclude Message field
|
||||
fsc := NewFetchSourceContext(true).Exclude("message")
|
||||
res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Found != true {
|
||||
t.Errorf("expected Found = true; got %v", res.Found)
|
||||
}
|
||||
if res.Source == nil {
|
||||
t.Errorf("expected Source != nil; got %v", res.Source)
|
||||
}
|
||||
var tw tweet
|
||||
err = json.Unmarshal(*res.Source, &tw)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tw.User != "olivere" {
|
||||
t.Errorf("expected user %q; got: %q", "olivere", tw.User)
|
||||
}
|
||||
if tw.Message != "" {
|
||||
t.Errorf("expected message %q; got: %q", "", tw.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWithFields(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get document 1, specifying fields
|
||||
res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").StoredFields("message").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Found != true {
|
||||
t.Errorf("expected Found = true; got: %v", res.Found)
|
||||
}
|
||||
|
||||
// We must NOT have the "user" field
|
||||
_, ok := res.Fields["user"]
|
||||
if ok {
|
||||
t.Fatalf("expected no field %q in document", "user")
|
||||
}
|
||||
|
||||
// We must have the "message" field
|
||||
messageField, ok := res.Fields["message"]
|
||||
if !ok {
|
||||
t.Fatalf("expected field %q in document", "message")
|
||||
}
|
||||
|
||||
// Depending on the version of elasticsearch the message field will be returned
|
||||
// as a string or a slice of strings. This test works in both cases.
|
||||
|
||||
messageString, ok := messageField.(string)
|
||||
if !ok {
|
||||
messageArray, ok := messageField.([]interface{})
|
||||
if !ok {
|
||||
t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField)
|
||||
} else {
|
||||
messageString, ok = messageArray[0].(string)
|
||||
if !ok {
|
||||
t.Fatalf("expected field %q to be a string or a slice of strings; got: %T", "message", messageField)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if messageString != tweet1.Message {
|
||||
t.Errorf("expected message %q; got: %q", tweet1.Message, messageString)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidate(t *testing.T) {
|
||||
// Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
if _, err := client.Get().Do(context.TODO()); err == nil {
|
||||
t.Fatal("expected Get to fail")
|
||||
}
|
||||
if _, err := client.Get().Index(testIndexName).Do(context.TODO()); err == nil {
|
||||
t.Fatal("expected Get to fail")
|
||||
}
|
||||
if _, err := client.Get().Type("tweet").Do(context.TODO()); err == nil {
|
||||
t.Fatal("expected Get to fail")
|
||||
}
|
||||
if _, err := client.Get().Id("1").Do(context.TODO()); err == nil {
|
||||
t.Fatal("expected Get to fail")
|
||||
}
|
||||
if _, err := client.Get().Index(testIndexName).Type("tweet").Do(context.TODO()); err == nil {
|
||||
t.Fatal("expected Get to fail")
|
||||
}
|
||||
if _, err := client.Get().Type("tweet").Id("1").Do(context.TODO()); err == nil {
|
||||
t.Fatal("expected Get to fail")
|
||||
}
|
||||
}
|
||||
451
vendor/gopkg.in/olivere/elastic.v5/highlight.go
generated
vendored
Normal file
451
vendor/gopkg.in/olivere/elastic.v5/highlight.go
generated
vendored
Normal file
@@ -0,0 +1,451 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
// Highlight allows highlighting search results on one or more fields.
|
||||
// For details, see:
|
||||
// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-highlighting.html
|
||||
type Highlight struct {
|
||||
fields []*HighlighterField
|
||||
tagsSchema *string
|
||||
highlightFilter *bool
|
||||
fragmentSize *int
|
||||
numOfFragments *int
|
||||
preTags []string
|
||||
postTags []string
|
||||
order *string
|
||||
encoder *string
|
||||
requireFieldMatch *bool
|
||||
boundaryMaxScan *int
|
||||
boundaryChars *string
|
||||
highlighterType *string
|
||||
fragmenter *string
|
||||
highlightQuery Query
|
||||
noMatchSize *int
|
||||
phraseLimit *int
|
||||
options map[string]interface{}
|
||||
forceSource *bool
|
||||
useExplicitFieldOrder bool
|
||||
}
|
||||
|
||||
func NewHighlight() *Highlight {
|
||||
hl := &Highlight{
|
||||
options: make(map[string]interface{}),
|
||||
}
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
|
||||
hl.fields = append(hl.fields, fields...)
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) Field(name string) *Highlight {
|
||||
field := NewHighlighterField(name)
|
||||
hl.fields = append(hl.fields, field)
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
|
||||
hl.tagsSchema = &schemaName
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
|
||||
hl.highlightFilter = &highlightFilter
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
|
||||
hl.fragmentSize = &fragmentSize
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
|
||||
hl.numOfFragments = &numOfFragments
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) Encoder(encoder string) *Highlight {
|
||||
hl.encoder = &encoder
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) PreTags(preTags ...string) *Highlight {
|
||||
hl.preTags = append(hl.preTags, preTags...)
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) PostTags(postTags ...string) *Highlight {
|
||||
hl.postTags = append(hl.postTags, postTags...)
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) Order(order string) *Highlight {
|
||||
hl.order = &order
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
|
||||
hl.requireFieldMatch = &requireFieldMatch
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
|
||||
hl.boundaryMaxScan = &boundaryMaxScan
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight {
|
||||
hl.boundaryChars = &boundaryChars
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
|
||||
hl.highlighterType = &highlighterType
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
|
||||
hl.fragmenter = &fragmenter
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
|
||||
hl.highlightQuery = highlightQuery
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
|
||||
hl.noMatchSize = &noMatchSize
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
|
||||
hl.options = options
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
|
||||
hl.forceSource = &forceSource
|
||||
return hl
|
||||
}
|
||||
|
||||
func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
|
||||
hl.useExplicitFieldOrder = useExplicitFieldOrder
|
||||
return hl
|
||||
}
|
||||
|
||||
// Creates the query source for the bool query.
|
||||
func (hl *Highlight) Source() (interface{}, error) {
|
||||
// Returns the map inside of "highlight":
|
||||
// "highlight":{
|
||||
// ... this ...
|
||||
// }
|
||||
source := make(map[string]interface{})
|
||||
if hl.tagsSchema != nil {
|
||||
source["tags_schema"] = *hl.tagsSchema
|
||||
}
|
||||
if hl.preTags != nil && len(hl.preTags) > 0 {
|
||||
source["pre_tags"] = hl.preTags
|
||||
}
|
||||
if hl.postTags != nil && len(hl.postTags) > 0 {
|
||||
source["post_tags"] = hl.postTags
|
||||
}
|
||||
if hl.order != nil {
|
||||
source["order"] = *hl.order
|
||||
}
|
||||
if hl.highlightFilter != nil {
|
||||
source["highlight_filter"] = *hl.highlightFilter
|
||||
}
|
||||
if hl.fragmentSize != nil {
|
||||
source["fragment_size"] = *hl.fragmentSize
|
||||
}
|
||||
if hl.numOfFragments != nil {
|
||||
source["number_of_fragments"] = *hl.numOfFragments
|
||||
}
|
||||
if hl.encoder != nil {
|
||||
source["encoder"] = *hl.encoder
|
||||
}
|
||||
if hl.requireFieldMatch != nil {
|
||||
source["require_field_match"] = *hl.requireFieldMatch
|
||||
}
|
||||
if hl.boundaryMaxScan != nil {
|
||||
source["boundary_max_scan"] = *hl.boundaryMaxScan
|
||||
}
|
||||
if hl.boundaryChars != nil {
|
||||
source["boundary_chars"] = *hl.boundaryChars
|
||||
}
|
||||
if hl.highlighterType != nil {
|
||||
source["type"] = *hl.highlighterType
|
||||
}
|
||||
if hl.fragmenter != nil {
|
||||
source["fragmenter"] = *hl.fragmenter
|
||||
}
|
||||
if hl.highlightQuery != nil {
|
||||
src, err := hl.highlightQuery.Source()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
source["highlight_query"] = src
|
||||
}
|
||||
if hl.noMatchSize != nil {
|
||||
source["no_match_size"] = *hl.noMatchSize
|
||||
}
|
||||
if hl.phraseLimit != nil {
|
||||
source["phrase_limit"] = *hl.phraseLimit
|
||||
}
|
||||
if hl.options != nil && len(hl.options) > 0 {
|
||||
source["options"] = hl.options
|
||||
}
|
||||
if hl.forceSource != nil {
|
||||
source["force_source"] = *hl.forceSource
|
||||
}
|
||||
|
||||
if hl.fields != nil && len(hl.fields) > 0 {
|
||||
if hl.useExplicitFieldOrder {
|
||||
// Use a slice for the fields
|
||||
var fields []map[string]interface{}
|
||||
for _, field := range hl.fields {
|
||||
src, err := field.Source()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmap := make(map[string]interface{})
|
||||
fmap[field.Name] = src
|
||||
fields = append(fields, fmap)
|
||||
}
|
||||
source["fields"] = fields
|
||||
} else {
|
||||
// Use a map for the fields
|
||||
fields := make(map[string]interface{}, 0)
|
||||
for _, field := range hl.fields {
|
||||
src, err := field.Source()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fields[field.Name] = src
|
||||
}
|
||||
source["fields"] = fields
|
||||
}
|
||||
}
|
||||
|
||||
return source, nil
|
||||
}
|
||||
|
||||
// HighlighterField specifies a highlighted field.
|
||||
type HighlighterField struct {
|
||||
Name string
|
||||
|
||||
preTags []string
|
||||
postTags []string
|
||||
fragmentSize int
|
||||
fragmentOffset int
|
||||
numOfFragments int
|
||||
highlightFilter *bool
|
||||
order *string
|
||||
requireFieldMatch *bool
|
||||
boundaryMaxScan int
|
||||
boundaryChars []rune
|
||||
highlighterType *string
|
||||
fragmenter *string
|
||||
highlightQuery Query
|
||||
noMatchSize *int
|
||||
matchedFields []string
|
||||
phraseLimit *int
|
||||
options map[string]interface{}
|
||||
forceSource *bool
|
||||
|
||||
/*
|
||||
Name string
|
||||
preTags []string
|
||||
postTags []string
|
||||
fragmentSize int
|
||||
numOfFragments int
|
||||
fragmentOffset int
|
||||
highlightFilter *bool
|
||||
order string
|
||||
requireFieldMatch *bool
|
||||
boundaryMaxScan int
|
||||
boundaryChars []rune
|
||||
highlighterType string
|
||||
fragmenter string
|
||||
highlightQuery Query
|
||||
noMatchSize *int
|
||||
matchedFields []string
|
||||
options map[string]interface{}
|
||||
forceSource *bool
|
||||
*/
|
||||
}
|
||||
|
||||
func NewHighlighterField(name string) *HighlighterField {
|
||||
return &HighlighterField{
|
||||
Name: name,
|
||||
preTags: make([]string, 0),
|
||||
postTags: make([]string, 0),
|
||||
fragmentSize: -1,
|
||||
fragmentOffset: -1,
|
||||
numOfFragments: -1,
|
||||
boundaryMaxScan: -1,
|
||||
boundaryChars: make([]rune, 0),
|
||||
matchedFields: make([]string, 0),
|
||||
options: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
|
||||
f.preTags = append(f.preTags, preTags...)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
|
||||
f.postTags = append(f.postTags, postTags...)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
|
||||
f.fragmentSize = fragmentSize
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
|
||||
f.fragmentOffset = fragmentOffset
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
|
||||
f.numOfFragments = numOfFragments
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
|
||||
f.highlightFilter = &highlightFilter
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) Order(order string) *HighlighterField {
|
||||
f.order = &order
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
|
||||
f.requireFieldMatch = &requireFieldMatch
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
|
||||
f.boundaryMaxScan = boundaryMaxScan
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
|
||||
f.boundaryChars = append(f.boundaryChars, boundaryChars...)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
|
||||
f.highlighterType = &highlighterType
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
|
||||
f.fragmenter = &fragmenter
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
|
||||
f.highlightQuery = highlightQuery
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
|
||||
f.noMatchSize = &noMatchSize
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
|
||||
f.options = options
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
|
||||
f.matchedFields = append(f.matchedFields, matchedFields...)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
|
||||
f.phraseLimit = &phraseLimit
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
|
||||
f.forceSource = &forceSource
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *HighlighterField) Source() (interface{}, error) {
|
||||
source := make(map[string]interface{})
|
||||
|
||||
if f.preTags != nil && len(f.preTags) > 0 {
|
||||
source["pre_tags"] = f.preTags
|
||||
}
|
||||
if f.postTags != nil && len(f.postTags) > 0 {
|
||||
source["post_tags"] = f.postTags
|
||||
}
|
||||
if f.fragmentSize != -1 {
|
||||
source["fragment_size"] = f.fragmentSize
|
||||
}
|
||||
if f.numOfFragments != -1 {
|
||||
source["number_of_fragments"] = f.numOfFragments
|
||||
}
|
||||
if f.fragmentOffset != -1 {
|
||||
source["fragment_offset"] = f.fragmentOffset
|
||||
}
|
||||
if f.highlightFilter != nil {
|
||||
source["highlight_filter"] = *f.highlightFilter
|
||||
}
|
||||
if f.order != nil {
|
||||
source["order"] = *f.order
|
||||
}
|
||||
if f.requireFieldMatch != nil {
|
||||
source["require_field_match"] = *f.requireFieldMatch
|
||||
}
|
||||
if f.boundaryMaxScan != -1 {
|
||||
source["boundary_max_scan"] = f.boundaryMaxScan
|
||||
}
|
||||
if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
|
||||
source["boundary_chars"] = f.boundaryChars
|
||||
}
|
||||
if f.highlighterType != nil {
|
||||
source["type"] = *f.highlighterType
|
||||
}
|
||||
if f.fragmenter != nil {
|
||||
source["fragmenter"] = *f.fragmenter
|
||||
}
|
||||
if f.highlightQuery != nil {
|
||||
src, err := f.highlightQuery.Source()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
source["highlight_query"] = src
|
||||
}
|
||||
if f.noMatchSize != nil {
|
||||
source["no_match_size"] = *f.noMatchSize
|
||||
}
|
||||
if f.matchedFields != nil && len(f.matchedFields) > 0 {
|
||||
source["matched_fields"] = f.matchedFields
|
||||
}
|
||||
if f.phraseLimit != nil {
|
||||
source["phrase_limit"] = *f.phraseLimit
|
||||
}
|
||||
if f.options != nil && len(f.options) > 0 {
|
||||
source["options"] = f.options
|
||||
}
|
||||
if f.forceSource != nil {
|
||||
source["force_source"] = *f.forceSource
|
||||
}
|
||||
|
||||
return source, nil
|
||||
}
|
||||
209
vendor/gopkg.in/olivere/elastic.v5/highlight_test.go
generated
vendored
Normal file
209
vendor/gopkg.in/olivere/elastic.v5/highlight_test.go
generated
vendored
Normal file
@@ -0,0 +1,209 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHighlighterField(t *testing.T) {
|
||||
field := NewHighlighterField("grade")
|
||||
src, err := field.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlighterFieldWithOptions(t *testing.T) {
|
||||
field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
|
||||
src, err := field.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"fragment_size":2,"number_of_fragments":1}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlightWithStringField(t *testing.T) {
|
||||
builder := NewHighlight().Field("grade")
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"fields":{"grade":{}}}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlightWithFields(t *testing.T) {
|
||||
gradeField := NewHighlighterField("grade")
|
||||
builder := NewHighlight().Fields(gradeField)
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"fields":{"grade":{}}}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlightWithMultipleFields(t *testing.T) {
|
||||
gradeField := NewHighlighterField("grade")
|
||||
colorField := NewHighlighterField("color")
|
||||
builder := NewHighlight().Fields(gradeField, colorField)
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"fields":{"color":{},"grade":{}}}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
|
||||
gradeField := NewHighlighterField("grade").FragmentSize(2)
|
||||
colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
|
||||
builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlightWithBoundaryChars(t *testing.T) {
|
||||
builder := NewHighlight().BoundaryChars(" \t\r")
|
||||
src, err := builder.Source()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
data, err := json.Marshal(src)
|
||||
if err != nil {
|
||||
t.Fatalf("marshaling to JSON failed: %v", err)
|
||||
}
|
||||
got := string(data)
|
||||
expected := `{"boundary_chars":" \t\r"}`
|
||||
if got != expected {
|
||||
t.Errorf("expected\n%s\n,got:\n%s", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHighlightWithTermQuery(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
|
||||
tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
|
||||
|
||||
// Add all documents
|
||||
_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Specify highlighter
|
||||
hl := NewHighlight()
|
||||
hl = hl.Fields(NewHighlighterField("message"))
|
||||
hl = hl.PreTags("<em>").PostTags("</em>")
|
||||
|
||||
// Match all should return all documents
|
||||
query := NewPrefixQuery("message", "golang")
|
||||
searchResult, err := client.Search().
|
||||
Index(testIndexName).
|
||||
Highlight(hl).
|
||||
Query(query).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if searchResult.Hits == nil {
|
||||
t.Fatalf("expected SearchResult.Hits != nil; got nil")
|
||||
}
|
||||
if searchResult.Hits.TotalHits != 1 {
|
||||
t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
|
||||
}
|
||||
if len(searchResult.Hits.Hits) != 1 {
|
||||
t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
|
||||
}
|
||||
|
||||
hit := searchResult.Hits.Hits[0]
|
||||
var tw tweet
|
||||
if err := json.Unmarshal(*hit.Source, &tw); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hit.Highlight == nil || len(hit.Highlight) == 0 {
|
||||
t.Fatal("expected hit to have a highlight; got nil")
|
||||
}
|
||||
if hl, found := hit.Highlight["message"]; found {
|
||||
if len(hl) != 1 {
|
||||
t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
|
||||
}
|
||||
expected := "Welcome to <em>Golang</em> and Elasticsearch."
|
||||
if hl[0] != expected {
|
||||
t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
|
||||
}
|
||||
} else {
|
||||
t.Fatal("expected to have a highlight on field \"message\"; got none")
|
||||
}
|
||||
}
|
||||
288
vendor/gopkg.in/olivere/elastic.v5/index.go
generated
vendored
Normal file
288
vendor/gopkg.in/olivere/elastic.v5/index.go
generated
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndexService adds or updates a typed JSON document in a specified index,
|
||||
// making it searchable.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html
|
||||
// for details.
|
||||
type IndexService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
id string
|
||||
index string
|
||||
typ string
|
||||
parent string
|
||||
routing string
|
||||
timeout string
|
||||
timestamp string
|
||||
ttl string
|
||||
version interface{}
|
||||
opType string
|
||||
versionType string
|
||||
refresh string
|
||||
waitForActiveShards string
|
||||
pipeline string
|
||||
bodyJson interface{}
|
||||
bodyString string
|
||||
}
|
||||
|
||||
// NewIndexService creates a new IndexService.
|
||||
func NewIndexService(client *Client) *IndexService {
|
||||
return &IndexService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Id is the document ID.
|
||||
func (s *IndexService) Id(id string) *IndexService {
|
||||
s.id = id
|
||||
return s
|
||||
}
|
||||
|
||||
// Index is the name of the index.
|
||||
func (s *IndexService) Index(index string) *IndexService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Type is the type of the document.
|
||||
func (s *IndexService) Type(typ string) *IndexService {
|
||||
s.typ = typ
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitForActiveShards sets the number of shard copies that must be active
|
||||
// before proceeding with the index operation. Defaults to 1, meaning the
|
||||
// primary shard only. Set to `all` for all shard copies, otherwise set to
|
||||
// any non-negative value less than or equal to the total number of copies
|
||||
// for the shard (number of replicas + 1).
|
||||
func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService {
|
||||
s.waitForActiveShards = waitForActiveShards
|
||||
return s
|
||||
}
|
||||
|
||||
// Pipeline specifies the pipeline id to preprocess incoming documents with.
|
||||
func (s *IndexService) Pipeline(pipeline string) *IndexService {
|
||||
s.pipeline = pipeline
|
||||
return s
|
||||
}
|
||||
|
||||
// Refresh the index after performing the operation.
|
||||
func (s *IndexService) Refresh(refresh string) *IndexService {
|
||||
s.refresh = refresh
|
||||
return s
|
||||
}
|
||||
|
||||
// Ttl is an expiration time for the document.
|
||||
func (s *IndexService) Ttl(ttl string) *IndexService {
|
||||
s.ttl = ttl
|
||||
return s
|
||||
}
|
||||
|
||||
// TTL is an expiration time for the document (alias for Ttl).
|
||||
func (s *IndexService) TTL(ttl string) *IndexService {
|
||||
s.ttl = ttl
|
||||
return s
|
||||
}
|
||||
|
||||
// Version is an explicit version number for concurrency control.
|
||||
func (s *IndexService) Version(version interface{}) *IndexService {
|
||||
s.version = version
|
||||
return s
|
||||
}
|
||||
|
||||
// OpType is an explicit operation type, i.e. "create" or "index" (default).
|
||||
func (s *IndexService) OpType(opType string) *IndexService {
|
||||
s.opType = opType
|
||||
return s
|
||||
}
|
||||
|
||||
// Parent is the ID of the parent document.
|
||||
func (s *IndexService) Parent(parent string) *IndexService {
|
||||
s.parent = parent
|
||||
return s
|
||||
}
|
||||
|
||||
// Routing is a specific routing value.
|
||||
func (s *IndexService) Routing(routing string) *IndexService {
|
||||
s.routing = routing
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is an explicit operation timeout.
|
||||
func (s *IndexService) Timeout(timeout string) *IndexService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Timestamp is an explicit timestamp for the document.
|
||||
func (s *IndexService) Timestamp(timestamp string) *IndexService {
|
||||
s.timestamp = timestamp
|
||||
return s
|
||||
}
|
||||
|
||||
// VersionType is a specific version type.
|
||||
func (s *IndexService) VersionType(versionType string) *IndexService {
|
||||
s.versionType = versionType
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndexService) Pretty(pretty bool) *IndexService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyJson is the document as a serializable JSON interface.
|
||||
func (s *IndexService) BodyJson(body interface{}) *IndexService {
|
||||
s.bodyJson = body
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyString is the document encoded as a string.
|
||||
func (s *IndexService) BodyString(body string) *IndexService {
|
||||
s.bodyString = body
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndexService) buildURL() (string, string, url.Values, error) {
|
||||
var err error
|
||||
var method, path string
|
||||
|
||||
if s.id != "" {
|
||||
// Create document with manual id
|
||||
method = "PUT"
|
||||
path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
|
||||
"id": s.id,
|
||||
"index": s.index,
|
||||
"type": s.typ,
|
||||
})
|
||||
} else {
|
||||
// Automatic ID generation
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-creation
|
||||
method = "POST"
|
||||
path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{
|
||||
"index": s.index,
|
||||
"type": s.typ,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.waitForActiveShards != "" {
|
||||
params.Set("wait_for_active_shards", s.waitForActiveShards)
|
||||
}
|
||||
if s.refresh != "" {
|
||||
params.Set("refresh", s.refresh)
|
||||
}
|
||||
if s.opType != "" {
|
||||
params.Set("op_type", s.opType)
|
||||
}
|
||||
if s.parent != "" {
|
||||
params.Set("parent", s.parent)
|
||||
}
|
||||
if s.pipeline != "" {
|
||||
params.Set("pipeline", s.pipeline)
|
||||
}
|
||||
if s.routing != "" {
|
||||
params.Set("routing", s.routing)
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.timestamp != "" {
|
||||
params.Set("timestamp", s.timestamp)
|
||||
}
|
||||
if s.ttl != "" {
|
||||
params.Set("ttl", s.ttl)
|
||||
}
|
||||
if s.version != nil {
|
||||
params.Set("version", fmt.Sprintf("%v", s.version))
|
||||
}
|
||||
if s.versionType != "" {
|
||||
params.Set("version_type", s.versionType)
|
||||
}
|
||||
return method, path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndexService) Validate() error {
|
||||
var invalid []string
|
||||
if s.index == "" {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if s.typ == "" {
|
||||
invalid = append(invalid, "Type")
|
||||
}
|
||||
if s.bodyString == "" && s.bodyJson == nil {
|
||||
invalid = append(invalid, "BodyJson")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
method, path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
var body interface{}
|
||||
if s.bodyJson != nil {
|
||||
body = s.bodyJson
|
||||
} else {
|
||||
body = s.bodyString
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, method, path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(IndexResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// IndexResponse is the result of indexing a document in Elasticsearch.
|
||||
type IndexResponse struct {
|
||||
// TODO _shards { total, failed, successful }
|
||||
Index string `json:"_index"`
|
||||
Type string `json:"_type"`
|
||||
Id string `json:"_id"`
|
||||
Version int `json:"_version"`
|
||||
Created bool `json:"created"`
|
||||
}
|
||||
280
vendor/gopkg.in/olivere/elastic.v5/index_test.go
generated
vendored
Normal file
280
vendor/gopkg.in/olivere/elastic.v5/index_test.go
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndexLifecycle(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
|
||||
// Add a document
|
||||
indexResult, err := client.Index().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
BodyJson(&tweet1).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indexResult == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", indexResult)
|
||||
}
|
||||
|
||||
// Exists
|
||||
exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Errorf("expected exists %v; got %v", true, exists)
|
||||
}
|
||||
|
||||
// Get document
|
||||
getResult, err := client.Get().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
Id("1").
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if getResult.Index != testIndexName {
|
||||
t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
|
||||
}
|
||||
if getResult.Type != "tweet" {
|
||||
t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
|
||||
}
|
||||
if getResult.Id != "1" {
|
||||
t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
|
||||
}
|
||||
if getResult.Source == nil {
|
||||
t.Errorf("expected GetResult.Source to be != nil; got nil")
|
||||
}
|
||||
|
||||
// Decode the Source field
|
||||
var tweetGot tweet
|
||||
err = json.Unmarshal(*getResult.Source, &tweetGot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tweetGot.User != tweet1.User {
|
||||
t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
|
||||
}
|
||||
if tweetGot.Message != tweet1.Message {
|
||||
t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
|
||||
}
|
||||
|
||||
// Delete document again
|
||||
deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if deleteResult == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", deleteResult)
|
||||
}
|
||||
|
||||
// Exists
|
||||
exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Errorf("expected exists %v; got %v", false, exists)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexLifecycleWithAutomaticIDGeneration(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
|
||||
// Add a document
|
||||
indexResult, err := client.Index().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
BodyJson(&tweet1).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indexResult == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", indexResult)
|
||||
}
|
||||
if indexResult.Id == "" {
|
||||
t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
|
||||
}
|
||||
id := indexResult.Id
|
||||
|
||||
// Exists
|
||||
exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Errorf("expected exists %v; got %v", true, exists)
|
||||
}
|
||||
|
||||
// Get document
|
||||
getResult, err := client.Get().
|
||||
Index(testIndexName).
|
||||
Type("tweet").
|
||||
Id(id).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if getResult.Index != testIndexName {
|
||||
t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
|
||||
}
|
||||
if getResult.Type != "tweet" {
|
||||
t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
|
||||
}
|
||||
if getResult.Id != id {
|
||||
t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
|
||||
}
|
||||
if getResult.Source == nil {
|
||||
t.Errorf("expected GetResult.Source to be != nil; got nil")
|
||||
}
|
||||
|
||||
// Decode the Source field
|
||||
var tweetGot tweet
|
||||
err = json.Unmarshal(*getResult.Source, &tweetGot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if tweetGot.User != tweet1.User {
|
||||
t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
|
||||
}
|
||||
if tweetGot.Message != tweet1.Message {
|
||||
t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
|
||||
}
|
||||
|
||||
// Delete document again
|
||||
deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if deleteResult == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", deleteResult)
|
||||
}
|
||||
|
||||
// Exists
|
||||
exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Errorf("expected exists %v; got %v", false, exists)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewIndexService(client).Type("tweet").Id("1").BodyJson(&tweet).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Index to fail without index name")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err = NewIndexService(client).Index(testIndexName).Id("1").BodyJson(&tweet).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected Index to fail without type")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexCreateExistsOpenCloseDelete(t *testing.T) {
|
||||
// TODO: Find out how to make these test robust
|
||||
t.Skip("test fails regularly with 409 (Conflict): " +
|
||||
"IndexPrimaryShardNotAllocatedException[[elastic-test] " +
|
||||
"primary not allocated post api... skipping")
|
||||
|
||||
client := setupTestClient(t)
|
||||
|
||||
// Create index
|
||||
createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if createIndex == nil {
|
||||
t.Fatalf("expected response; got: %v", createIndex)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Exists
|
||||
indexExists, err := client.IndexExists(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !indexExists {
|
||||
t.Fatalf("expected index exists=%v; got %v", true, indexExists)
|
||||
}
|
||||
|
||||
// Flush
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Close index
|
||||
closeIndex, err := client.CloseIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if closeIndex == nil {
|
||||
t.Fatalf("expected response; got: %v", closeIndex)
|
||||
}
|
||||
if !closeIndex.Acknowledged {
|
||||
t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Open index
|
||||
openIndex, err := client.OpenIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if openIndex == nil {
|
||||
t.Fatalf("expected response; got: %v", openIndex)
|
||||
}
|
||||
if !openIndex.Acknowledged {
|
||||
t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Flush
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete index
|
||||
deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if deleteIndex == nil {
|
||||
t.Fatalf("expected response; got: %v", deleteIndex)
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged)
|
||||
}
|
||||
}
|
||||
279
vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
generated
vendored
Normal file
279
vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go
generated
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesAnalyzeService performs the analysis process on a text and returns
|
||||
// the tokens breakdown of the text.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html
|
||||
// for detail.
|
||||
type IndicesAnalyzeService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index string
|
||||
request *IndicesAnalyzeRequest
|
||||
format string
|
||||
preferLocal *bool
|
||||
bodyJson interface{}
|
||||
bodyString string
|
||||
}
|
||||
|
||||
// NewIndicesAnalyzeService creates a new IndicesAnalyzeService.
|
||||
func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService {
|
||||
return &IndicesAnalyzeService{
|
||||
client: client,
|
||||
request: new(IndicesAnalyzeRequest),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is the name of the index to scope the operation.
|
||||
func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Format of the output.
|
||||
func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService {
|
||||
s.format = format
|
||||
return s
|
||||
}
|
||||
|
||||
// PreferLocal, when true, specifies that a local shard should be used
|
||||
// if available. When false, a random shard is used (default: true).
|
||||
func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService {
|
||||
s.preferLocal = &preferLocal
|
||||
return s
|
||||
}
|
||||
|
||||
// Request passes the analyze request to use.
|
||||
func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService {
|
||||
if request == nil {
|
||||
s.request = new(IndicesAnalyzeRequest)
|
||||
} else {
|
||||
s.request = request
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Analyzer is the name of the analyzer to use.
|
||||
func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService {
|
||||
s.request.Analyzer = analyzer
|
||||
return s
|
||||
}
|
||||
|
||||
// Attributes is a list of token attributes to output; this parameter works
|
||||
// only with explain=true.
|
||||
func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService {
|
||||
s.request.Attributes = attributes
|
||||
return s
|
||||
}
|
||||
|
||||
// CharFilter is a list of character filters to use for the analysis.
|
||||
func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService {
|
||||
s.request.CharFilter = charFilter
|
||||
return s
|
||||
}
|
||||
|
||||
// Explain, when true, outputs more advanced details (default: false).
|
||||
func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService {
|
||||
s.request.Explain = explain
|
||||
return s
|
||||
}
|
||||
|
||||
// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name).
|
||||
func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService {
|
||||
s.request.Field = field
|
||||
return s
|
||||
}
|
||||
|
||||
// Filter is a list of filters to use for the analysis.
|
||||
func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService {
|
||||
s.request.Filter = filter
|
||||
return s
|
||||
}
|
||||
|
||||
// Text is the text on which the analysis should be performed (when request body is not used).
|
||||
func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService {
|
||||
s.request.Text = text
|
||||
return s
|
||||
}
|
||||
|
||||
// Tokenizer is the name of the tokenizer to use for the analysis.
|
||||
func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService {
|
||||
s.request.Tokenizer = tokenizer
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyJson is the text on which the analysis should be performed.
|
||||
func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService {
|
||||
s.bodyJson = body
|
||||
return s
|
||||
}
|
||||
|
||||
// BodyString is the text on which the analysis should be performed.
|
||||
func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService {
|
||||
s.bodyString = body
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
var err error
|
||||
var path string
|
||||
|
||||
if s.index == "" {
|
||||
path = "/_analyze"
|
||||
} else {
|
||||
path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{
|
||||
"index": s.index,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.format != "" {
|
||||
params.Set("format", s.format)
|
||||
}
|
||||
if s.preferLocal != nil {
|
||||
params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal))
|
||||
}
|
||||
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Do will execute the request with the given context.
|
||||
func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
var body interface{}
|
||||
if s.bodyJson != nil {
|
||||
body = s.bodyJson
|
||||
} else if s.bodyString != "" {
|
||||
body = s.bodyString
|
||||
} else {
|
||||
// Request parameters are deprecated in 5.1.1, and we must use a JSON
|
||||
// structure in the body to pass the parameters.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html
|
||||
body = s.request
|
||||
}
|
||||
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := new(IndicesAnalyzeResponse)
|
||||
if err = s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *IndicesAnalyzeService) Validate() error {
|
||||
var invalid []string
|
||||
if s.bodyJson == nil && s.bodyString == "" {
|
||||
if len(s.request.Text) == 0 {
|
||||
invalid = append(invalid, "Text")
|
||||
}
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IndicesAnalyzeRequest specifies the parameters of the analyze request.
|
||||
type IndicesAnalyzeRequest struct {
|
||||
Text []string `json:"text,omitempty"`
|
||||
Analyzer string `json:"analyzer,omitempty"`
|
||||
Tokenizer string `json:"tokenizer,omitempty"`
|
||||
Filter []string `json:"filter,omitempty"`
|
||||
CharFilter []string `json:"char_filter,omitempty"`
|
||||
Field string `json:"field,omitempty"`
|
||||
Explain bool `json:"explain,omitempty"`
|
||||
Attributes []string `json:"attributes,omitempty"`
|
||||
}
|
||||
|
||||
type IndicesAnalyzeResponse struct {
|
||||
Tokens []IndicesAnalyzeResponseToken `json:"tokens"` // json part for normal message
|
||||
Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request
|
||||
}
|
||||
|
||||
type IndicesAnalyzeResponseToken struct {
|
||||
Token string `json:"token"`
|
||||
StartOffset int `json:"start_offset"`
|
||||
EndOffset int `json:"end_offset"`
|
||||
Type string `json:"type"`
|
||||
Position int `json:"position"`
|
||||
}
|
||||
|
||||
type IndicesAnalyzeResponseDetail struct {
|
||||
CustomAnalyzer bool `json:"custom_analyzer"`
|
||||
Charfilters []interface{} `json:"charfilters"`
|
||||
Analyzer struct {
|
||||
Name string `json:"name"`
|
||||
Tokens []struct {
|
||||
Token string `json:"token"`
|
||||
StartOffset int `json:"start_offset"`
|
||||
EndOffset int `json:"end_offset"`
|
||||
Type string `json:"type"`
|
||||
Position int `json:"position"`
|
||||
Bytes string `json:"bytes"`
|
||||
PositionLength int `json:"positionLength"`
|
||||
} `json:"tokens"`
|
||||
} `json:"analyzer"`
|
||||
Tokenizer struct {
|
||||
Name string `json:"name"`
|
||||
Tokens []struct {
|
||||
Token string `json:"token"`
|
||||
StartOffset int `json:"start_offset"`
|
||||
EndOffset int `json:"end_offset"`
|
||||
Type string `json:"type"`
|
||||
Position int `json:"position"`
|
||||
} `json:"tokens"`
|
||||
} `json:"tokenizer"`
|
||||
Tokenfilters []struct {
|
||||
Name string `json:"name"`
|
||||
Tokens []struct {
|
||||
Token string `json:"token"`
|
||||
StartOffset int `json:"start_offset"`
|
||||
EndOffset int `json:"end_offset"`
|
||||
Type string `json:"type"`
|
||||
Position int `json:"position"`
|
||||
Keyword bool `json:"keyword"`
|
||||
} `json:"tokens"`
|
||||
} `json:"tokenfilters"`
|
||||
}
|
||||
85
vendor/gopkg.in/olivere/elastic.v5/indices_analyze_test.go
generated
vendored
Normal file
85
vendor/gopkg.in/olivere/elastic.v5/indices_analyze_test.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndicesAnalyzeURL(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
tests := []struct {
|
||||
Index string
|
||||
Expected string
|
||||
}{
|
||||
{
|
||||
"",
|
||||
"/_analyze",
|
||||
},
|
||||
{
|
||||
"tweets",
|
||||
"/tweets/_analyze",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
path, _, err := client.IndexAnalyze().Index(test.Index).buildURL()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if path != test.Expected {
|
||||
t.Errorf("expected %q; got: %q", test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesAnalyze(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
// client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
|
||||
res, err := client.IndexAnalyze().Text("hello hi guy").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
if len(res.Tokens) != 3 {
|
||||
t.Fatalf("expected %d, got %d (%+v)", 3, len(res.Tokens), res.Tokens)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesAnalyzeDetail(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
// client := setupTestClientAndCreateIndexAndLog(t, SetTraceLog(log.New(os.Stdout, "", 0)))
|
||||
|
||||
res, err := client.IndexAnalyze().Text("hello hi guy").Explain(true).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
|
||||
if len(res.Detail.Analyzer.Tokens) != 3 {
|
||||
t.Fatalf("expected %d tokens, got %d (%+v)", 3, len(res.Detail.Tokenizer.Tokens), res.Detail.Tokenizer.Tokens)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesAnalyzeWithIndex(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
_, err := client.IndexAnalyze().Index("foo").Text("hello hi guy").Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if want, have := "elastic: Error 404 (Not Found): no such index [type=index_not_found_exception]", err.Error(); want != have {
|
||||
t.Fatalf("expected error %q, got %q", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesAnalyzeValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
_, err := client.IndexAnalyze().Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if want, have := "missing required fields: [Text]", err.Error(); want != have {
|
||||
t.Fatalf("expected error %q, got %q", want, have)
|
||||
}
|
||||
}
|
||||
153
vendor/gopkg.in/olivere/elastic.v5/indices_close.go
generated
vendored
Normal file
153
vendor/gopkg.in/olivere/elastic.v5/indices_close.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesCloseService closes an index.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html
|
||||
// for details.
|
||||
type IndicesCloseService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index string
|
||||
timeout string
|
||||
masterTimeout string
|
||||
ignoreUnavailable *bool
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
}
|
||||
|
||||
// NewIndicesCloseService creates and initializes a new IndicesCloseService.
|
||||
func NewIndicesCloseService(client *Client) *IndicesCloseService {
|
||||
return &IndicesCloseService{client: client}
|
||||
}
|
||||
|
||||
// Index is the name of the index to close.
|
||||
func (s *IndicesCloseService) Index(index string) *IndicesCloseService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is an explicit operation timeout.
|
||||
func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// MasterTimeout specifies the timeout for connection to master.
|
||||
func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService {
|
||||
s.masterTimeout = masterTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
|
||||
func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both.
|
||||
func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesCloseService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/_close", map[string]string{
|
||||
"index": s.index,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.masterTimeout != "" {
|
||||
params.Set("master_timeout", s.masterTimeout)
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesCloseService) Validate() error {
|
||||
var invalid []string
|
||||
if s.index == "" {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(IndicesCloseResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// IndicesCloseResponse is the response of IndicesCloseService.Do.
|
||||
type IndicesCloseResponse struct {
|
||||
Acknowledged bool `json:"acknowledged"`
|
||||
}
|
||||
84
vendor/gopkg.in/olivere/elastic.v5/indices_close_test.go
generated
vendored
Normal file
84
vendor/gopkg.in/olivere/elastic.v5/indices_close_test.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TODO(oe): Find out why this test fails on Travis CI.
|
||||
/*
|
||||
func TestIndicesOpenAndClose(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// Create index
|
||||
createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
|
||||
}
|
||||
defer func() {
|
||||
// Delete index
|
||||
deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
|
||||
}
|
||||
}()
|
||||
|
||||
waitForYellow := func() {
|
||||
// Wait for status yellow
|
||||
res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res != nil && res.TimedOut {
|
||||
t.Fatalf("cluster time out waiting for status %q", "yellow")
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for cluster
|
||||
waitForYellow()
|
||||
|
||||
// Close index
|
||||
cresp, err := client.CloseIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !cresp.Acknowledged {
|
||||
t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
|
||||
}
|
||||
|
||||
// Wait for cluster
|
||||
waitForYellow()
|
||||
|
||||
// Open index again
|
||||
oresp, err := client.OpenIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !oresp.Acknowledged {
|
||||
t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func TestIndicesCloseValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewIndicesCloseService(client).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected IndicesClose to fail without index name")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
130
vendor/gopkg.in/olivere/elastic.v5/indices_create.go
generated
vendored
Normal file
130
vendor/gopkg.in/olivere/elastic.v5/indices_create.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesCreateService creates a new index.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-create-index.html
|
||||
// for details.
|
||||
type IndicesCreateService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index string
|
||||
timeout string
|
||||
masterTimeout string
|
||||
bodyJson interface{}
|
||||
bodyString string
|
||||
}
|
||||
|
||||
// NewIndicesCreateService returns a new IndicesCreateService.
|
||||
func NewIndicesCreateService(client *Client) *IndicesCreateService {
|
||||
return &IndicesCreateService{client: client}
|
||||
}
|
||||
|
||||
// Index is the name of the index to create.
|
||||
func (b *IndicesCreateService) Index(index string) *IndicesCreateService {
|
||||
b.index = index
|
||||
return b
|
||||
}
|
||||
|
||||
// Timeout the explicit operation timeout, e.g. "5s".
|
||||
func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// MasterTimeout specifies the timeout for connection to master.
|
||||
func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService {
|
||||
s.masterTimeout = masterTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Body specifies the configuration of the index as a string.
|
||||
// It is an alias for BodyString.
|
||||
func (b *IndicesCreateService) Body(body string) *IndicesCreateService {
|
||||
b.bodyString = body
|
||||
return b
|
||||
}
|
||||
|
||||
// BodyString specifies the configuration of the index as a string.
|
||||
func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService {
|
||||
b.bodyString = body
|
||||
return b
|
||||
}
|
||||
|
||||
// BodyJson specifies the configuration of the index. The interface{} will
|
||||
// be serializes as a JSON document, so use a map[string]interface{}.
|
||||
func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService {
|
||||
b.bodyJson = body
|
||||
return b
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService {
|
||||
b.pretty = pretty
|
||||
return b
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) {
|
||||
if b.index == "" {
|
||||
return nil, errors.New("missing index name")
|
||||
}
|
||||
|
||||
// Build url
|
||||
path, err := uritemplates.Expand("/{index}", map[string]string{
|
||||
"index": b.index,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := make(url.Values)
|
||||
if b.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if b.masterTimeout != "" {
|
||||
params.Set("master_timeout", b.masterTimeout)
|
||||
}
|
||||
if b.timeout != "" {
|
||||
params.Set("timeout", b.timeout)
|
||||
}
|
||||
|
||||
// Setup HTTP request body
|
||||
var body interface{}
|
||||
if b.bodyJson != nil {
|
||||
body = b.bodyJson
|
||||
} else {
|
||||
body = b.bodyString
|
||||
}
|
||||
|
||||
// Get response
|
||||
res, err := b.client.PerformRequest(ctx, "PUT", path, params, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := new(IndicesCreateResult)
|
||||
if err := b.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Result of a create index request.
|
||||
|
||||
// IndicesCreateResult is the outcome of creating a new index.
|
||||
type IndicesCreateResult struct {
|
||||
Acknowledged bool `json:"acknowledged"`
|
||||
ShardsAcknowledged bool `json:"shards_acknowledged"`
|
||||
}
|
||||
63
vendor/gopkg.in/olivere/elastic.v5/indices_create_test.go
generated
vendored
Normal file
63
vendor/gopkg.in/olivere/elastic.v5/indices_create_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndicesLifecycle(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// Create index
|
||||
createIndex, err := client.CreateIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
t.Errorf("expected IndicesCreateResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Check if index exists
|
||||
indexExists, err := client.IndexExists(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !indexExists {
|
||||
t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
|
||||
}
|
||||
|
||||
// Delete index
|
||||
deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Check if index exists
|
||||
indexExists, err = client.IndexExists(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if indexExists {
|
||||
t.Fatalf("index %s should not exist, but does\n", testIndexName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesCreateValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewIndicesCreateService(client).Body(testMapping).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected IndicesCreate to fail without index name")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
129
vendor/gopkg.in/olivere/elastic.v5/indices_delete.go
generated
vendored
Normal file
129
vendor/gopkg.in/olivere/elastic.v5/indices_delete.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesDeleteService allows to delete existing indices.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-delete-index.html
|
||||
// for details.
|
||||
type IndicesDeleteService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
timeout string
|
||||
masterTimeout string
|
||||
}
|
||||
|
||||
// NewIndicesDeleteService creates and initializes a new IndicesDeleteService.
|
||||
func NewIndicesDeleteService(client *Client) *IndicesDeleteService {
|
||||
return &IndicesDeleteService{
|
||||
client: client,
|
||||
index: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index adds the list of indices to delete.
|
||||
// Use `_all` or `*` string to delete all indices.
|
||||
func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is an explicit operation timeout.
|
||||
func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// MasterTimeout specifies the timeout for connection to master.
|
||||
func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService {
|
||||
s.masterTimeout = masterTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesDeleteService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.masterTimeout != "" {
|
||||
params.Set("master_timeout", s.masterTimeout)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesDeleteService) Validate() error {
|
||||
var invalid []string
|
||||
if len(s.index) == 0 {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(IndicesDeleteResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Result of a delete index request.
|
||||
|
||||
// IndicesDeleteResponse is the response of IndicesDeleteService.Do.
|
||||
type IndicesDeleteResponse struct {
|
||||
Acknowledged bool `json:"acknowledged"`
|
||||
}
|
||||
122
vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go
generated
vendored
Normal file
122
vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesDeleteTemplateService deletes index templates.
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html.
|
||||
type IndicesDeleteTemplateService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
name string
|
||||
timeout string
|
||||
masterTimeout string
|
||||
}
|
||||
|
||||
// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
|
||||
func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
|
||||
return &IndicesDeleteTemplateService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Name is the name of the template.
|
||||
func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
|
||||
s.name = name
|
||||
return s
|
||||
}
|
||||
|
||||
// Timeout is an explicit operation timeout.
|
||||
func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
|
||||
s.timeout = timeout
|
||||
return s
|
||||
}
|
||||
|
||||
// MasterTimeout specifies the timeout for connection to master.
|
||||
func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
|
||||
s.masterTimeout = masterTimeout
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
|
||||
"name": s.name,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.timeout != "" {
|
||||
params.Set("timeout", s.timeout)
|
||||
}
|
||||
if s.masterTimeout != "" {
|
||||
params.Set("master_timeout", s.masterTimeout)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesDeleteTemplateService) Validate() error {
|
||||
var invalid []string
|
||||
if s.name == "" {
|
||||
invalid = append(invalid, "Name")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(IndicesDeleteTemplateResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
|
||||
type IndicesDeleteTemplateResponse struct {
|
||||
Acknowledged bool `json:"acknowledged,omitempty"`
|
||||
}
|
||||
23
vendor/gopkg.in/olivere/elastic.v5/indices_delete_test.go
generated
vendored
Normal file
23
vendor/gopkg.in/olivere/elastic.v5/indices_delete_test.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndicesDeleteValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewIndicesDeleteService(client).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected IndicesDelete to fail without index name")
|
||||
}
|
||||
if res != nil {
|
||||
t.Fatalf("expected result to be == nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
150
vendor/gopkg.in/olivere/elastic.v5/indices_exists.go
generated
vendored
Normal file
150
vendor/gopkg.in/olivere/elastic.v5/indices_exists.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesExistsService checks if an index or indices exist or not.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-exists.html
|
||||
// for details.
|
||||
type IndicesExistsService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
ignoreUnavailable *bool
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
local *bool
|
||||
}
|
||||
|
||||
// NewIndicesExistsService creates and initializes a new IndicesExistsService.
|
||||
func NewIndicesExistsService(client *Client) *IndicesExistsService {
|
||||
return &IndicesExistsService{
|
||||
client: client,
|
||||
index: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of one or more indices to check.
|
||||
func (s *IndicesExistsService) Index(index []string) *IndicesExistsService {
|
||||
s.index = index
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||
// resolves into no concrete indices. (This includes `_all` string or
|
||||
// when no indices have been specified).
|
||||
func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both.
|
||||
func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Local, when set, returns local information and does not retrieve the state
|
||||
// from master node (default: false).
|
||||
func (s *IndicesExistsService) Local(local bool) *IndicesExistsService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesExistsService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesExistsService) Validate() error {
|
||||
var invalid []string
|
||||
if len(s.index) == 0 {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
return true, nil
|
||||
case http.StatusNotFound:
|
||||
return false, nil
|
||||
default:
|
||||
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||
}
|
||||
}
|
||||
113
vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go
generated
vendored
Normal file
113
vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesExistsTemplateService checks if a given template exists.
|
||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html#indices-templates-exists
|
||||
// for documentation.
|
||||
type IndicesExistsTemplateService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
name string
|
||||
local *bool
|
||||
}
|
||||
|
||||
// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
|
||||
func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
|
||||
return &IndicesExistsTemplateService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Name is the name of the template.
|
||||
func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
|
||||
s.name = name
|
||||
return s
|
||||
}
|
||||
|
||||
// Local indicates whether to return local information, i.e. do not retrieve
|
||||
// the state from master node (default: false).
|
||||
func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/_template/{name}", map[string]string{
|
||||
"name": s.name,
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesExistsTemplateService) Validate() error {
|
||||
var invalid []string
|
||||
if s.name == "" {
|
||||
invalid = append(invalid, "Name")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
return true, nil
|
||||
case http.StatusNotFound:
|
||||
return false, nil
|
||||
default:
|
||||
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||
}
|
||||
}
|
||||
68
vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go
generated
vendored
Normal file
68
vendor/gopkg.in/olivere/elastic.v5/indices_exists_template_test.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndexExistsTemplate(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tmpl := `{
|
||||
"template":"elastic-test*",
|
||||
"settings":{
|
||||
"number_of_shards":1,
|
||||
"number_of_replicas":0
|
||||
},
|
||||
"mappings":{
|
||||
"tweet":{
|
||||
"properties":{
|
||||
"tags":{
|
||||
"type":"keyword"
|
||||
},
|
||||
"location":{
|
||||
"type":"geo_point"
|
||||
},
|
||||
"suggest_field":{
|
||||
"type":"completion"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if putres == nil {
|
||||
t.Fatalf("expected response; got: %v", putres)
|
||||
}
|
||||
if !putres.Acknowledged {
|
||||
t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged)
|
||||
}
|
||||
|
||||
// Always delete template
|
||||
defer client.IndexDeleteTemplate("elastic-template").Do(context.TODO())
|
||||
|
||||
// Check if template exists
|
||||
exists, err := client.IndexTemplateExists("elastic-template").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if !exists {
|
||||
t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists)
|
||||
}
|
||||
|
||||
// Get template
|
||||
getres, err := client.IndexGetTemplate("elastic-template").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error; got: %v", err)
|
||||
}
|
||||
if getres == nil {
|
||||
t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres)
|
||||
}
|
||||
}
|
||||
23
vendor/gopkg.in/olivere/elastic.v5/indices_exists_test.go
generated
vendored
Normal file
23
vendor/gopkg.in/olivere/elastic.v5/indices_exists_test.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndicesExistsWithoutIndex(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewIndicesExistsService(client).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected IndicesExists to fail without index name")
|
||||
}
|
||||
if res != false {
|
||||
t.Fatalf("expected result to be false; got: %v", res)
|
||||
}
|
||||
}
|
||||
160
vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go
generated
vendored
Normal file
160
vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesExistsTypeService checks if one or more types exist in one or more indices.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-types-exists.html
|
||||
// for details.
|
||||
type IndicesExistsTypeService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
typ []string
|
||||
index []string
|
||||
expandWildcards string
|
||||
local *bool
|
||||
ignoreUnavailable *bool
|
||||
allowNoIndices *bool
|
||||
}
|
||||
|
||||
// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
|
||||
func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
|
||||
return &IndicesExistsTypeService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names; use `_all` to check the types across all indices.
|
||||
func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService {
|
||||
s.index = append(s.index, indices...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Type is a list of document types to check.
|
||||
func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService {
|
||||
s.typ = append(s.typ, types...)
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices.
|
||||
// (This includes `_all` string or when no indices have been specified).
|
||||
func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both.
|
||||
func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Local specifies whether to return local information, i.e. do not retrieve
|
||||
// the state from master node (default: false).
|
||||
func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
"type": strings.Join(s.typ, ","),
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesExistsTypeService) Validate() error {
|
||||
var invalid []string
|
||||
if len(s.index) == 0 {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(s.typ) == 0 {
|
||||
invalid = append(invalid, "Type")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
return true, nil
|
||||
case http.StatusNotFound:
|
||||
return false, nil
|
||||
default:
|
||||
return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
|
||||
}
|
||||
}
|
||||
135
vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go
generated
vendored
Normal file
135
vendor/gopkg.in/olivere/elastic.v5/indices_exists_type_test.go
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndicesExistsTypeBuildURL(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tests := []struct {
|
||||
Indices []string
|
||||
Types []string
|
||||
Expected string
|
||||
ExpectValidateFailure bool
|
||||
}{
|
||||
{
|
||||
[]string{},
|
||||
[]string{},
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
[]string{},
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{},
|
||||
[]string{"type1"},
|
||||
"",
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
[]string{"type1"},
|
||||
"/index1/_mapping/type1",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
[]string{"type1"},
|
||||
"/index1%2Cindex2/_mapping/type1",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
[]string{"type1", "type2"},
|
||||
"/index1%2Cindex2/_mapping/type1%2Ctype2",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate()
|
||||
if err == nil && test.ExpectValidateFailure {
|
||||
t.Errorf("#%d: expected validate to fail", i+1)
|
||||
continue
|
||||
}
|
||||
if err != nil && !test.ExpectValidateFailure {
|
||||
t.Errorf("#%d: expected validate to succeed", i+1)
|
||||
continue
|
||||
}
|
||||
if !test.ExpectValidateFailure {
|
||||
path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL()
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i+1, err)
|
||||
}
|
||||
if path != test.Expected {
|
||||
t.Errorf("#%d: expected %q; got: %q", i+1, test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesExistsType(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// Create index with tweet type
|
||||
createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if createIndex == nil {
|
||||
t.Errorf("expected result to be != nil; got: %v", createIndex)
|
||||
}
|
||||
if !createIndex.Acknowledged {
|
||||
t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Check if type exists
|
||||
exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !exists {
|
||||
t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName)
|
||||
}
|
||||
|
||||
// Delete index
|
||||
deleteIndex, err := client.DeleteIndex(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !deleteIndex.Acknowledged {
|
||||
t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
|
||||
}
|
||||
|
||||
// Check if type exists
|
||||
exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesExistsTypeValidate(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
// No index name -> fail with error
|
||||
res, err := NewIndicesExistsTypeService(client).Do(context.TODO())
|
||||
if err == nil {
|
||||
t.Fatalf("expected IndicesExistsType to fail without index name")
|
||||
}
|
||||
if res != false {
|
||||
t.Fatalf("expected result to be false; got: %v", res)
|
||||
}
|
||||
}
|
||||
169
vendor/gopkg.in/olivere/elastic.v5/indices_flush.go
generated
vendored
Normal file
169
vendor/gopkg.in/olivere/elastic.v5/indices_flush.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// Flush allows to flush one or more indices. The flush process of an index
|
||||
// basically frees memory from the index by flushing data to the index
|
||||
// storage and clearing the internal transaction log.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-flush.html
|
||||
// for details.
|
||||
type IndicesFlushService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
force *bool
|
||||
waitIfOngoing *bool
|
||||
ignoreUnavailable *bool
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
}
|
||||
|
||||
// NewIndicesFlushService creates a new IndicesFlushService.
|
||||
func NewIndicesFlushService(client *Client) *IndicesFlushService {
|
||||
return &IndicesFlushService{
|
||||
client: client,
|
||||
index: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names; use `_all` or empty string for all indices.
|
||||
func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService {
|
||||
s.index = append(s.index, indices...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Force indicates whether a flush should be forced even if it is not
|
||||
// necessarily needed ie. if no changes will be committed to the index.
|
||||
// This is useful if transaction log IDs should be incremented even if
|
||||
// no uncommitted changes are present. (This setting can be considered as internal).
|
||||
func (s *IndicesFlushService) Force(force bool) *IndicesFlushService {
|
||||
s.force = &force
|
||||
return s
|
||||
}
|
||||
|
||||
// WaitIfOngoing, if set to true, indicates that the flush operation will
|
||||
// block until the flush can be executed if another flush operation is
|
||||
// already executing. The default is false and will cause an exception
|
||||
// to be thrown on the shard level if another flush operation is already running..
|
||||
func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService {
|
||||
s.waitIfOngoing = &waitIfOngoing
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices expression
|
||||
// resolves into no concrete indices. (This includes `_all` string or when
|
||||
// no indices have been specified).
|
||||
func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards specifies whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both..
|
||||
func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesFlushService) buildURL() (string, url.Values, error) {
|
||||
// Build URL
|
||||
var err error
|
||||
var path string
|
||||
|
||||
if len(s.index) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/_flush", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
} else {
|
||||
path = "/_flush"
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.force != nil {
|
||||
params.Set("force", fmt.Sprintf("%v", *s.force))
|
||||
}
|
||||
if s.waitIfOngoing != nil {
|
||||
params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesFlushService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the service.
|
||||
func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(IndicesFlushResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Result of a flush request.
|
||||
|
||||
type IndicesFlushResponse struct {
|
||||
Shards shardsInfo `json:"_shards"`
|
||||
}
|
||||
70
vendor/gopkg.in/olivere/elastic.v5/indices_flush_test.go
generated
vendored
Normal file
70
vendor/gopkg.in/olivere/elastic.v5/indices_flush_test.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFlush(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
// Flush all indices
|
||||
res, err := client.Flush().Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Errorf("expected res to be != nil; got: %v", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlushBuildURL(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
tests := []struct {
|
||||
Indices []string
|
||||
Expected string
|
||||
ExpectValidateFailure bool
|
||||
}{
|
||||
{
|
||||
[]string{},
|
||||
"/_flush",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
"/index1/_flush",
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
"/index1%2Cindex2/_flush",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
err := NewIndicesFlushService(client).Index(test.Indices...).Validate()
|
||||
if err == nil && test.ExpectValidateFailure {
|
||||
t.Errorf("case #%d: expected validate to fail", i+1)
|
||||
continue
|
||||
}
|
||||
if err != nil && !test.ExpectValidateFailure {
|
||||
t.Errorf("case #%d: expected validate to succeed", i+1)
|
||||
continue
|
||||
}
|
||||
if !test.ExpectValidateFailure {
|
||||
path, _, err := NewIndicesFlushService(client).Index(test.Indices...).buildURL()
|
||||
if err != nil {
|
||||
t.Fatalf("case #%d: %v", i+1, err)
|
||||
}
|
||||
if path != test.Expected {
|
||||
t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
189
vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go
generated
vendored
Normal file
189
vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesForcemergeService allows to force merging of one or more indices.
|
||||
// The merge relates to the number of segments a Lucene index holds
|
||||
// within each shard. The force merge operation allows to reduce the number
|
||||
// of segments by merging them.
|
||||
//
|
||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-forcemerge.html
|
||||
// for more information.
|
||||
type IndicesForcemergeService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
flush *bool
|
||||
ignoreUnavailable *bool
|
||||
maxNumSegments interface{}
|
||||
onlyExpungeDeletes *bool
|
||||
operationThreading interface{}
|
||||
}
|
||||
|
||||
// NewIndicesForcemergeService creates a new IndicesForcemergeService.
|
||||
func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService {
|
||||
return &IndicesForcemergeService{
|
||||
client: client,
|
||||
index: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names; use `_all` or empty string to perform
|
||||
// the operation on all indices.
|
||||
func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService {
|
||||
if s.index == nil {
|
||||
s.index = make([]string, 0)
|
||||
}
|
||||
s.index = append(s.index, index...)
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices.
|
||||
// (This includes `_all` string or when no indices have been specified).
|
||||
func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both..
|
||||
func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Flush specifies whether the index should be flushed after performing
|
||||
// the operation (default: true).
|
||||
func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService {
|
||||
s.flush = &flush
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should
|
||||
// be ignored when unavailable (missing or closed).
|
||||
func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// MaxNumSegments specifies the number of segments the index should be
|
||||
// merged into (default: dynamic).
|
||||
func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService {
|
||||
s.maxNumSegments = maxNumSegments
|
||||
return s
|
||||
}
|
||||
|
||||
// OnlyExpungeDeletes specifies whether the operation should only expunge
|
||||
// deleted documents.
|
||||
func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService {
|
||||
s.onlyExpungeDeletes = &onlyExpungeDeletes
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService {
|
||||
s.operationThreading = operationThreading
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) {
|
||||
var err error
|
||||
var path string
|
||||
|
||||
// Build URL
|
||||
if len(s.index) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
} else {
|
||||
path = "/_forcemerge"
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.flush != nil {
|
||||
params.Set("flush", fmt.Sprintf("%v", *s.flush))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.maxNumSegments != nil {
|
||||
params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments))
|
||||
}
|
||||
if s.onlyExpungeDeletes != nil {
|
||||
params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
|
||||
}
|
||||
if s.operationThreading != nil {
|
||||
params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesForcemergeService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "POST", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
ret := new(IndicesForcemergeResponse)
|
||||
if err := s.client.decoder.Decode(res.Body, ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do.
|
||||
type IndicesForcemergeResponse struct {
|
||||
Shards shardsInfo `json:"_shards"`
|
||||
}
|
||||
57
vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge_test.go
generated
vendored
Normal file
57
vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge_test.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIndicesForcemergeBuildURL(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
tests := []struct {
|
||||
Indices []string
|
||||
Expected string
|
||||
}{
|
||||
{
|
||||
[]string{},
|
||||
"/_forcemerge",
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
"/index1/_forcemerge",
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
"/index1%2Cindex2/_forcemerge",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
path, _, err := client.Forcemerge().Index(test.Indices...).buildURL()
|
||||
if err != nil {
|
||||
t.Errorf("case #%d: %v", i+1, err)
|
||||
continue
|
||||
}
|
||||
if path != test.Expected {
|
||||
t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndicesForcemerge(t *testing.T) {
|
||||
client := setupTestClientAndCreateIndexAndAddDocs(t)
|
||||
|
||||
_, err := client.Forcemerge(testIndexName).MaxNumSegments(1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
/*
|
||||
if !ok {
|
||||
t.Fatalf("expected forcemerge to succeed; got: %v", ok)
|
||||
}
|
||||
*/
|
||||
}
|
||||
202
vendor/gopkg.in/olivere/elastic.v5/indices_get.go
generated
vendored
Normal file
202
vendor/gopkg.in/olivere/elastic.v5/indices_get.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesGetService retrieves information about one or more indices.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-index.html
|
||||
// for more details.
|
||||
type IndicesGetService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
feature []string
|
||||
local *bool
|
||||
ignoreUnavailable *bool
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
flatSettings *bool
|
||||
human *bool
|
||||
}
|
||||
|
||||
// NewIndicesGetService creates a new IndicesGetService.
|
||||
func NewIndicesGetService(client *Client) *IndicesGetService {
|
||||
return &IndicesGetService{
|
||||
client: client,
|
||||
index: make([]string, 0),
|
||||
feature: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names.
|
||||
func (s *IndicesGetService) Index(indices ...string) *IndicesGetService {
|
||||
s.index = append(s.index, indices...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Feature is a list of features.
|
||||
func (s *IndicesGetService) Feature(features ...string) *IndicesGetService {
|
||||
s.feature = append(s.feature, features...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Local indicates whether to return local information, i.e. do not retrieve
|
||||
// the state from master node (default: false).
|
||||
func (s *IndicesGetService) Local(local bool) *IndicesGetService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
|
||||
func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard expression
|
||||
// resolves to no concrete indices (default: false).
|
||||
func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether wildcard expressions should get
|
||||
// expanded to open or closed indices (default: open).
|
||||
func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
/* Disabled because serialization would fail in that case. */
|
||||
/*
|
||||
// FlatSettings make the service return settings in flat format (default: false).
|
||||
func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService {
|
||||
s.flatSettings = &flatSettings
|
||||
return s
|
||||
}
|
||||
*/
|
||||
|
||||
// Human indicates whether to return version and creation date values
|
||||
// in human-readable format (default: false).
|
||||
func (s *IndicesGetService) Human(human bool) *IndicesGetService {
|
||||
s.human = &human
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesGetService) buildURL() (string, url.Values, error) {
|
||||
var err error
|
||||
var path string
|
||||
var index []string
|
||||
|
||||
if len(s.index) > 0 {
|
||||
index = s.index
|
||||
} else {
|
||||
index = []string{"_all"}
|
||||
}
|
||||
|
||||
if len(s.feature) > 0 {
|
||||
// Build URL
|
||||
path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
|
||||
"index": strings.Join(index, ","),
|
||||
"feature": strings.Join(s.feature, ","),
|
||||
})
|
||||
} else {
|
||||
// Build URL
|
||||
path, err = uritemplates.Expand("/{index}", map[string]string{
|
||||
"index": strings.Join(index, ","),
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.flatSettings != nil {
|
||||
params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
|
||||
}
|
||||
if s.human != nil {
|
||||
params.Set("human", fmt.Sprintf("%v", *s.human))
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesGetService) Validate() error {
|
||||
var invalid []string
|
||||
if len(s.index) == 0 {
|
||||
invalid = append(invalid, "Index")
|
||||
}
|
||||
if len(invalid) > 0 {
|
||||
return fmt.Errorf("missing required fields: %v", invalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation.
|
||||
func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) {
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
var ret map[string]*IndicesGetResponse
|
||||
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// IndicesGetResponse is part of the response of IndicesGetService.Do.
|
||||
type IndicesGetResponse struct {
|
||||
Aliases map[string]interface{} `json:"aliases"`
|
||||
Mappings map[string]interface{} `json:"mappings"`
|
||||
Settings map[string]interface{} `json:"settings"`
|
||||
Warmers map[string]interface{} `json:"warmers"`
|
||||
}
|
||||
157
vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go
generated
vendored
Normal file
157
vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// AliasesService returns the aliases associated with one or more indices.
|
||||
// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html.
|
||||
type AliasesService struct {
|
||||
client *Client
|
||||
index []string
|
||||
pretty bool
|
||||
}
|
||||
|
||||
// NewAliasesService instantiates a new AliasesService.
|
||||
func NewAliasesService(client *Client) *AliasesService {
|
||||
builder := &AliasesService{
|
||||
client: client,
|
||||
}
|
||||
return builder
|
||||
}
|
||||
|
||||
// Pretty asks Elasticsearch to indent the returned JSON.
|
||||
func (s *AliasesService) Pretty(pretty bool) *AliasesService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// Index adds one or more indices.
|
||||
func (s *AliasesService) Index(index ...string) *AliasesService {
|
||||
s.index = append(s.index, index...)
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *AliasesService) buildURL() (string, url.Values, error) {
|
||||
var err error
|
||||
var path string
|
||||
|
||||
if len(s.index) > 0 {
|
||||
path, err = uritemplates.Expand("/{index}/_aliases", map[string]string{
|
||||
"index": strings.Join(s.index, ","),
|
||||
})
|
||||
} else {
|
||||
path = "/_aliases"
|
||||
}
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", fmt.Sprintf("%v", s.pretty))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) {
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// {
|
||||
// "indexName" : {
|
||||
// "aliases" : {
|
||||
// "alias1" : { },
|
||||
// "alias2" : { }
|
||||
// }
|
||||
// },
|
||||
// "indexName2" : {
|
||||
// ...
|
||||
// },
|
||||
// }
|
||||
indexMap := make(map[string]interface{})
|
||||
if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Each (indexName, _)
|
||||
ret := &AliasesResult{
|
||||
Indices: make(map[string]indexResult),
|
||||
}
|
||||
for indexName, indexData := range indexMap {
|
||||
indexOut, found := ret.Indices[indexName]
|
||||
if !found {
|
||||
indexOut = indexResult{Aliases: make([]aliasResult, 0)}
|
||||
}
|
||||
|
||||
// { "aliases" : { ... } }
|
||||
indexDataMap, ok := indexData.(map[string]interface{})
|
||||
if ok {
|
||||
aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
|
||||
if ok {
|
||||
for aliasName, _ := range aliasesData {
|
||||
aliasRes := aliasResult{AliasName: aliasName}
|
||||
indexOut.Aliases = append(indexOut.Aliases, aliasRes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret.Indices[indexName] = indexOut
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// -- Result of an alias request.
|
||||
|
||||
type AliasesResult struct {
|
||||
Indices map[string]indexResult
|
||||
}
|
||||
|
||||
type indexResult struct {
|
||||
Aliases []aliasResult
|
||||
}
|
||||
|
||||
type aliasResult struct {
|
||||
AliasName string
|
||||
}
|
||||
|
||||
func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
|
||||
var indices []string
|
||||
for indexName, indexInfo := range ar.Indices {
|
||||
for _, aliasInfo := range indexInfo.Aliases {
|
||||
if aliasInfo.AliasName == aliasName {
|
||||
indices = append(indices, indexName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
func (ir indexResult) HasAlias(aliasName string) bool {
|
||||
for _, alias := range ir.Aliases {
|
||||
if alias.AliasName == aliasName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
180
vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go
generated
vendored
Normal file
180
vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases_test.go
generated
vendored
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAliasesBuildURL(t *testing.T) {
|
||||
client := setupTestClient(t)
|
||||
|
||||
tests := []struct {
|
||||
Indices []string
|
||||
Expected string
|
||||
}{
|
||||
{
|
||||
[]string{},
|
||||
"/_aliases",
|
||||
},
|
||||
{
|
||||
[]string{"index1"},
|
||||
"/index1/_aliases",
|
||||
},
|
||||
{
|
||||
[]string{"index1", "index2"},
|
||||
"/index1%2Cindex2/_aliases",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
path, _, err := client.Aliases().Index(test.Indices...).buildURL()
|
||||
if err != nil {
|
||||
t.Errorf("case #%d: %v", i+1, err)
|
||||
continue
|
||||
}
|
||||
if path != test.Expected {
|
||||
t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAliases(t *testing.T) {
|
||||
var err error
|
||||
|
||||
client := setupTestClientAndCreateIndex(t)
|
||||
|
||||
// Some tweets
|
||||
tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
|
||||
tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
|
||||
tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
|
||||
|
||||
// Add tweets to first index
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Add tweets to second index
|
||||
_, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Flush
|
||||
_, err = client.Flush().Index(testIndexName).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = client.Flush().Index(testIndexName2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Alias should not yet exist
|
||||
aliasesResult1, err := client.Aliases().
|
||||
Index(testIndexName, testIndexName2).
|
||||
//Pretty(true).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(aliasesResult1.Indices) != 2 {
|
||||
t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
|
||||
}
|
||||
for indexName, indexDetails := range aliasesResult1.Indices {
|
||||
if len(indexDetails.Aliases) != 0 {
|
||||
t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
|
||||
}
|
||||
}
|
||||
|
||||
// Add both indices to a new alias
|
||||
aliasCreate, err := client.Alias().
|
||||
Add(testIndexName, testAliasName).
|
||||
Add(testIndexName2, testAliasName).
|
||||
//Pretty(true).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !aliasCreate.Acknowledged {
|
||||
t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
|
||||
}
|
||||
|
||||
// Alias should now exist
|
||||
aliasesResult2, err := client.Aliases().
|
||||
Index(testIndexName, testIndexName2).
|
||||
//Pretty(true).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(aliasesResult2.Indices) != 2 {
|
||||
t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
|
||||
}
|
||||
for indexName, indexDetails := range aliasesResult2.Indices {
|
||||
if len(indexDetails.Aliases) != 1 {
|
||||
t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
|
||||
}
|
||||
}
|
||||
|
||||
// Check the reverse function:
|
||||
indexInfo1, found := aliasesResult2.Indices[testIndexName]
|
||||
if !found {
|
||||
t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
|
||||
}
|
||||
aliasFound := indexInfo1.HasAlias(testAliasName)
|
||||
if !aliasFound {
|
||||
t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
|
||||
}
|
||||
|
||||
// Check the reverse function:
|
||||
indexInfo2, found := aliasesResult2.Indices[testIndexName2]
|
||||
if !found {
|
||||
t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
|
||||
}
|
||||
aliasFound = indexInfo2.HasAlias(testAliasName)
|
||||
if !aliasFound {
|
||||
t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
|
||||
}
|
||||
|
||||
// Remove first index should remove two tweets, so should only yield 1
|
||||
aliasRemove1, err := client.Alias().
|
||||
Remove(testIndexName, testAliasName).
|
||||
//Pretty(true).
|
||||
Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !aliasRemove1.Acknowledged {
|
||||
t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
|
||||
}
|
||||
|
||||
// Alias should now exist only for index 2
|
||||
aliasesResult3, err := client.Aliases().Index(testIndexName, testIndexName2).Do(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(aliasesResult3.Indices) != 2 {
|
||||
t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
|
||||
}
|
||||
for indexName, indexDetails := range aliasesResult3.Indices {
|
||||
if indexName == testIndexName {
|
||||
if len(indexDetails.Aliases) != 0 {
|
||||
t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
|
||||
}
|
||||
} else if indexName == testIndexName2 {
|
||||
if len(indexDetails.Aliases) != 1 {
|
||||
t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
|
||||
}
|
||||
} else {
|
||||
t.Errorf("got index %s", indexName)
|
||||
}
|
||||
}
|
||||
}
|
||||
183
vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
generated
vendored
Normal file
183
vendor/gopkg.in/olivere/elastic.v5/indices_get_field_mapping.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2012-present Oliver Eilhard. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-license.
|
||||
// See http://olivere.mit-license.org/license.txt for details.
|
||||
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/olivere/elastic.v5/uritemplates"
|
||||
)
|
||||
|
||||
// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index
|
||||
// or index/type.
|
||||
//
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-field-mapping.html
|
||||
// for details.
|
||||
type IndicesGetFieldMappingService struct {
|
||||
client *Client
|
||||
pretty bool
|
||||
index []string
|
||||
typ []string
|
||||
field []string
|
||||
local *bool
|
||||
ignoreUnavailable *bool
|
||||
allowNoIndices *bool
|
||||
expandWildcards string
|
||||
}
|
||||
|
||||
// NewGetFieldMappingService is an alias for NewIndicesGetFieldMappingService.
|
||||
// Use NewIndicesGetFieldMappingService.
|
||||
func NewGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
|
||||
return NewIndicesGetFieldMappingService(client)
|
||||
}
|
||||
|
||||
// NewIndicesGetFieldMappingService creates a new IndicesGetFieldMappingService.
|
||||
func NewIndicesGetFieldMappingService(client *Client) *IndicesGetFieldMappingService {
|
||||
return &IndicesGetFieldMappingService{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Index is a list of index names.
|
||||
func (s *IndicesGetFieldMappingService) Index(indices ...string) *IndicesGetFieldMappingService {
|
||||
s.index = append(s.index, indices...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Type is a list of document types.
|
||||
func (s *IndicesGetFieldMappingService) Type(types ...string) *IndicesGetFieldMappingService {
|
||||
s.typ = append(s.typ, types...)
|
||||
return s
|
||||
}
|
||||
|
||||
// Field is a list of fields.
|
||||
func (s *IndicesGetFieldMappingService) Field(fields ...string) *IndicesGetFieldMappingService {
|
||||
s.field = append(s.field, fields...)
|
||||
return s
|
||||
}
|
||||
|
||||
// AllowNoIndices indicates whether to ignore if a wildcard indices
|
||||
// expression resolves into no concrete indices.
|
||||
// This includes `_all` string or when no indices have been specified.
|
||||
func (s *IndicesGetFieldMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetFieldMappingService {
|
||||
s.allowNoIndices = &allowNoIndices
|
||||
return s
|
||||
}
|
||||
|
||||
// ExpandWildcards indicates whether to expand wildcard expression to
|
||||
// concrete indices that are open, closed or both..
|
||||
func (s *IndicesGetFieldMappingService) ExpandWildcards(expandWildcards string) *IndicesGetFieldMappingService {
|
||||
s.expandWildcards = expandWildcards
|
||||
return s
|
||||
}
|
||||
|
||||
// Local indicates whether to return local information, do not retrieve
|
||||
// the state from master node (default: false).
|
||||
func (s *IndicesGetFieldMappingService) Local(local bool) *IndicesGetFieldMappingService {
|
||||
s.local = &local
|
||||
return s
|
||||
}
|
||||
|
||||
// IgnoreUnavailable indicates whether specified concrete indices should be
|
||||
// ignored when unavailable (missing or closed).
|
||||
func (s *IndicesGetFieldMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetFieldMappingService {
|
||||
s.ignoreUnavailable = &ignoreUnavailable
|
||||
return s
|
||||
}
|
||||
|
||||
// Pretty indicates that the JSON response be indented and human readable.
|
||||
func (s *IndicesGetFieldMappingService) Pretty(pretty bool) *IndicesGetFieldMappingService {
|
||||
s.pretty = pretty
|
||||
return s
|
||||
}
|
||||
|
||||
// buildURL builds the URL for the operation.
|
||||
func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) {
|
||||
var index, typ, field []string
|
||||
|
||||
if len(s.index) > 0 {
|
||||
index = s.index
|
||||
} else {
|
||||
index = []string{"_all"}
|
||||
}
|
||||
|
||||
if len(s.typ) > 0 {
|
||||
typ = s.typ
|
||||
} else {
|
||||
typ = []string{"_all"}
|
||||
}
|
||||
|
||||
if len(s.field) > 0 {
|
||||
field = s.field
|
||||
} else {
|
||||
field = []string{"*"}
|
||||
}
|
||||
|
||||
// Build URL
|
||||
path, err := uritemplates.Expand("/{index}/_mapping/{type}/field/{field}", map[string]string{
|
||||
"index": strings.Join(index, ","),
|
||||
"type": strings.Join(typ, ","),
|
||||
"field": strings.Join(field, ","),
|
||||
})
|
||||
if err != nil {
|
||||
return "", url.Values{}, err
|
||||
}
|
||||
|
||||
// Add query string parameters
|
||||
params := url.Values{}
|
||||
if s.pretty {
|
||||
params.Set("pretty", "1")
|
||||
}
|
||||
if s.ignoreUnavailable != nil {
|
||||
params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
|
||||
}
|
||||
if s.allowNoIndices != nil {
|
||||
params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
|
||||
}
|
||||
if s.expandWildcards != "" {
|
||||
params.Set("expand_wildcards", s.expandWildcards)
|
||||
}
|
||||
if s.local != nil {
|
||||
params.Set("local", fmt.Sprintf("%v", *s.local))
|
||||
}
|
||||
return path, params, nil
|
||||
}
|
||||
|
||||
// Validate checks if the operation is valid.
|
||||
func (s *IndicesGetFieldMappingService) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do executes the operation. It returns mapping definitions for an index
|
||||
// or index/type.
|
||||
func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]interface{}, error) {
|
||||
var ret map[string]interface{}
|
||||
|
||||
// Check pre-conditions
|
||||
if err := s.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get URL for request
|
||||
path, params, err := s.buildURL()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get HTTP response
|
||||
res, err := s.client.PerformRequest(ctx, "GET", path, params, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return operation response
|
||||
if err := s.client.decoder.Decode(res.Body, &ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user