mirror of
https://github.com/mattermost/mattermost.git
synced 2025-02-25 18:55:24 -06:00
MM-31436: Upgrade dependencies (#16605)
* MM-31436: Upgrade dependencies Ran make update-dependencies https://mattermost.atlassian.net/browse/MM-31436 ```release-note NONE ``` * add missing dep * fix lru marshaling
This commit is contained in:
6
vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
generated
vendored
6
vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
generated
vendored
@@ -1,5 +1,11 @@
|
||||
# Changelog
|
||||
|
||||
## 3.1.1 (2020-11-23)
|
||||
|
||||
### Fixed
|
||||
|
||||
- #158: Fixed issue with generated regex operation order that could cause problem
|
||||
|
||||
## 3.1.0 (2020-04-15)
|
||||
|
||||
### Added
|
||||
|
||||
11
vendor/github.com/Masterminds/semver/v3/constraints.go
generated
vendored
11
vendor/github.com/Masterminds/semver/v3/constraints.go
generated
vendored
@@ -164,14 +164,11 @@ func init() {
|
||||
"^": constraintCaret,
|
||||
}
|
||||
|
||||
ops := make([]string, 0, len(constraintOps))
|
||||
for k := range constraintOps {
|
||||
ops = append(ops, regexp.QuoteMeta(k))
|
||||
}
|
||||
ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
|
||||
|
||||
constraintRegex = regexp.MustCompile(fmt.Sprintf(
|
||||
`^\s*(%s)\s*(%s)\s*$`,
|
||||
strings.Join(ops, "|"),
|
||||
ops,
|
||||
cvRegex))
|
||||
|
||||
constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
|
||||
@@ -180,12 +177,12 @@ func init() {
|
||||
|
||||
findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
|
||||
`(%s)\s*(%s)`,
|
||||
strings.Join(ops, "|"),
|
||||
ops,
|
||||
cvRegex))
|
||||
|
||||
validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
|
||||
`^(\s*(%s)\s*(%s)\s*\,?)+$`,
|
||||
strings.Join(ops, "|"),
|
||||
ops,
|
||||
cvRegex))
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/Masterminds/squirrel/go.mod
generated
vendored
2
vendor/github.com/Masterminds/squirrel/go.mod
generated
vendored
@@ -1,5 +1,7 @@
|
||||
module github.com/Masterminds/squirrel
|
||||
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0
|
||||
|
||||
10
vendor/github.com/Masterminds/squirrel/select.go
generated
vendored
10
vendor/github.com/Masterminds/squirrel/select.go
generated
vendored
@@ -304,6 +304,16 @@ func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder
|
||||
return b.JoinClause("RIGHT JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// InnerJoin adds a INNER JOIN clause to the query.
|
||||
func (b SelectBuilder) InnerJoin(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("INNER JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// CrossJoin adds a CROSS JOIN clause to the query.
|
||||
func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder {
|
||||
return b.JoinClause("CROSS JOIN "+join, rest...)
|
||||
}
|
||||
|
||||
// Where adds an expression to the WHERE clause of the query.
|
||||
//
|
||||
// Expressions are ANDed together in the generated SQL.
|
||||
|
||||
6
vendor/github.com/Masterminds/squirrel/update.go
generated
vendored
6
vendor/github.com/Masterminds/squirrel/update.go
generated
vendored
@@ -86,7 +86,11 @@ func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) {
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
valSql = vsql
|
||||
if _, ok := vs.(SelectBuilder); ok {
|
||||
valSql = fmt.Sprintf("(%s)", vsql)
|
||||
} else {
|
||||
valSql = vsql
|
||||
}
|
||||
args = append(args, vargs...)
|
||||
} else {
|
||||
valSql = "?"
|
||||
|
||||
6
vendor/github.com/PuerkitoBio/goquery/README.md
generated
vendored
6
vendor/github.com/PuerkitoBio/goquery/README.md
generated
vendored
@@ -37,6 +37,7 @@ Please note that because of the net/html dependency, goquery requires Go1.1+.
|
||||
|
||||
**Note that goquery's API is now stable, and will not break.**
|
||||
|
||||
* **2020-10-08 (v1.6.0)** : Parse html in context of the container node for all functions that deal with html strings (`AfterHtml`, `AppendHtml`, etc.). Thanks to [@thiemok][thiemok] and [@davidjwilkins][djw] for their work on this.
|
||||
* **2020-02-04 (v1.5.1)** : Update module dependencies.
|
||||
* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
|
||||
* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
|
||||
@@ -143,8 +144,9 @@ func main() {
|
||||
- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
|
||||
- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
|
||||
- [tacusci/berrycms](https://github.com/tacusci/berrycms), a modern simple to use CMS with easy to write plugins
|
||||
- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
|
||||
- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
|
||||
- [Geziyor](https://github.com/geziyor/geziyor), a fast web crawling & scraping framework for Go. Supports JS rendering.
|
||||
- [Pagser](https://github.com/foolin/pagser), a simple, easy, extensible, configurable HTML parser to struct based on goquery and struct tags.
|
||||
|
||||
## Support
|
||||
|
||||
@@ -181,3 +183,5 @@ The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia'
|
||||
[thatguystone]: https://github.com/thatguystone
|
||||
[piotr]: https://github.com/piotrkowalczuk
|
||||
[goq]: https://github.com/andrewstuart/goq
|
||||
[thiemok]: https://github.com/thiemok
|
||||
[djw]: https://github.com/davidjwilkins
|
||||
|
||||
164
vendor/github.com/PuerkitoBio/goquery/manipulation.go
generated
vendored
164
vendor/github.com/PuerkitoBio/goquery/manipulation.go
generated
vendored
@@ -39,8 +39,15 @@ func (s *Selection) AfterSelection(sel *Selection) *Selection {
|
||||
// AfterHtml parses the html and inserts it after the set of matched elements.
|
||||
//
|
||||
// This follows the same rules as Selection.Append.
|
||||
func (s *Selection) AfterHtml(html string) *Selection {
|
||||
return s.AfterNodes(parseHtml(html)...)
|
||||
func (s *Selection) AfterHtml(htmlStr string) *Selection {
|
||||
return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
|
||||
nextSibling := node.NextSibling
|
||||
for _, n := range nodes {
|
||||
if node.Parent != nil {
|
||||
node.Parent.InsertBefore(n, nextSibling)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AfterNodes inserts the nodes after each element in the set of matched elements.
|
||||
@@ -85,8 +92,12 @@ func (s *Selection) AppendSelection(sel *Selection) *Selection {
|
||||
}
|
||||
|
||||
// AppendHtml parses the html and appends it to the set of matched elements.
|
||||
func (s *Selection) AppendHtml(html string) *Selection {
|
||||
return s.AppendNodes(parseHtml(html)...)
|
||||
func (s *Selection) AppendHtml(htmlStr string) *Selection {
|
||||
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
|
||||
for _, n := range nodes {
|
||||
node.AppendChild(n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AppendNodes appends the specified nodes to each node in the set of matched elements.
|
||||
@@ -123,8 +134,14 @@ func (s *Selection) BeforeSelection(sel *Selection) *Selection {
|
||||
// BeforeHtml parses the html and inserts it before the set of matched elements.
|
||||
//
|
||||
// This follows the same rules as Selection.Append.
|
||||
func (s *Selection) BeforeHtml(html string) *Selection {
|
||||
return s.BeforeNodes(parseHtml(html)...)
|
||||
func (s *Selection) BeforeHtml(htmlStr string) *Selection {
|
||||
return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
|
||||
for _, n := range nodes {
|
||||
if node.Parent != nil {
|
||||
node.Parent.InsertBefore(n, node)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// BeforeNodes inserts the nodes before each element in the set of matched elements.
|
||||
@@ -184,8 +201,13 @@ func (s *Selection) PrependSelection(sel *Selection) *Selection {
|
||||
}
|
||||
|
||||
// PrependHtml parses the html and prepends it to the set of matched elements.
|
||||
func (s *Selection) PrependHtml(html string) *Selection {
|
||||
return s.PrependNodes(parseHtml(html)...)
|
||||
func (s *Selection) PrependHtml(htmlStr string) *Selection {
|
||||
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
|
||||
firstChild := node.FirstChild
|
||||
for _, n := range nodes {
|
||||
node.InsertBefore(n, firstChild)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PrependNodes prepends the specified nodes to each node in the set of
|
||||
@@ -212,14 +234,19 @@ func (s *Selection) Remove() *Selection {
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoveFiltered removes the set of matched elements by selector.
|
||||
// It returns the Selection of removed nodes.
|
||||
// RemoveFiltered removes from the current set of matched elements those that
|
||||
// match the selector filter. It returns the Selection of removed nodes.
|
||||
//
|
||||
// For example if the selection s contains "<h1>", "<h2>" and "<h3>"
|
||||
// and s.RemoveFiltered("h2") is called, only the "<h2>" node is removed
|
||||
// (and returned), while "<h1>" and "<h3>" are kept in the document.
|
||||
func (s *Selection) RemoveFiltered(selector string) *Selection {
|
||||
return s.RemoveMatcher(compileMatcher(selector))
|
||||
}
|
||||
|
||||
// RemoveMatcher removes the set of matched elements.
|
||||
// It returns the Selection of removed nodes.
|
||||
// RemoveMatcher removes from the current set of matched elements those that
|
||||
// match the Matcher filter. It returns the Selection of removed nodes.
|
||||
// See RemoveFiltered for additional information.
|
||||
func (s *Selection) RemoveMatcher(m Matcher) *Selection {
|
||||
return s.FilterMatcher(m).Remove()
|
||||
}
|
||||
@@ -256,8 +283,16 @@ func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
|
||||
// It returns the removed elements.
|
||||
//
|
||||
// This follows the same rules as Selection.Append.
|
||||
func (s *Selection) ReplaceWithHtml(html string) *Selection {
|
||||
return s.ReplaceWithNodes(parseHtml(html)...)
|
||||
func (s *Selection) ReplaceWithHtml(htmlStr string) *Selection {
|
||||
s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
|
||||
nextSibling := node.NextSibling
|
||||
for _, n := range nodes {
|
||||
if node.Parent != nil {
|
||||
node.Parent.InsertBefore(n, nextSibling)
|
||||
}
|
||||
}
|
||||
})
|
||||
return s.Remove()
|
||||
}
|
||||
|
||||
// ReplaceWithNodes replaces each element in the set of matched elements with
|
||||
@@ -272,8 +307,17 @@ func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
|
||||
|
||||
// SetHtml sets the html content of each element in the selection to
|
||||
// specified html string.
|
||||
func (s *Selection) SetHtml(html string) *Selection {
|
||||
return setHtmlNodes(s, parseHtml(html)...)
|
||||
func (s *Selection) SetHtml(htmlStr string) *Selection {
|
||||
for _, context := range s.Nodes {
|
||||
for c := context.FirstChild; c != nil; c = context.FirstChild {
|
||||
context.RemoveChild(c)
|
||||
}
|
||||
}
|
||||
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
|
||||
for _, n := range nodes {
|
||||
node.AppendChild(n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SetText sets the content of each element in the selection to specified content.
|
||||
@@ -329,8 +373,23 @@ func (s *Selection) WrapSelection(sel *Selection) *Selection {
|
||||
// most child of the given HTML.
|
||||
//
|
||||
// It returns the original set of elements.
|
||||
func (s *Selection) WrapHtml(html string) *Selection {
|
||||
return s.wrapNodes(parseHtml(html)...)
|
||||
func (s *Selection) WrapHtml(htmlStr string) *Selection {
|
||||
nodesMap := make(map[string][]*html.Node)
|
||||
for _, context := range s.Nodes {
|
||||
var parent *html.Node
|
||||
if context.Parent != nil {
|
||||
parent = context.Parent
|
||||
} else {
|
||||
parent = &html.Node{Type: html.ElementNode}
|
||||
}
|
||||
nodes, found := nodesMap[nodeName(parent)]
|
||||
if !found {
|
||||
nodes = parseHtmlWithContext(htmlStr, parent)
|
||||
nodesMap[nodeName(parent)] = nodes
|
||||
}
|
||||
newSingleSelection(context, s.document).wrapAllNodes(cloneNodes(nodes)...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// WrapNode wraps each element in the set of matched elements inside the inner-
|
||||
@@ -382,8 +441,18 @@ func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
|
||||
// document.
|
||||
//
|
||||
// It returns the original set of elements.
|
||||
func (s *Selection) WrapAllHtml(html string) *Selection {
|
||||
return s.wrapAllNodes(parseHtml(html)...)
|
||||
func (s *Selection) WrapAllHtml(htmlStr string) *Selection {
|
||||
var context *html.Node
|
||||
var nodes []*html.Node
|
||||
if len(s.Nodes) > 0 {
|
||||
context = s.Nodes[0]
|
||||
if context.Parent != nil {
|
||||
nodes = parseHtmlWithContext(htmlStr, context)
|
||||
} else {
|
||||
nodes = parseHtml(htmlStr)
|
||||
}
|
||||
}
|
||||
return s.wrapAllNodes(nodes...)
|
||||
}
|
||||
|
||||
func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
|
||||
@@ -452,8 +521,17 @@ func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
|
||||
// cloned before being inserted into the document.
|
||||
//
|
||||
// It returns the original set of elements.
|
||||
func (s *Selection) WrapInnerHtml(html string) *Selection {
|
||||
return s.wrapInnerNodes(parseHtml(html)...)
|
||||
func (s *Selection) WrapInnerHtml(htmlStr string) *Selection {
|
||||
nodesMap := make(map[string][]*html.Node)
|
||||
for _, context := range s.Nodes {
|
||||
nodes, found := nodesMap[nodeName(context)]
|
||||
if !found {
|
||||
nodes = parseHtmlWithContext(htmlStr, context)
|
||||
nodesMap[nodeName(context)] = nodes
|
||||
}
|
||||
newSingleSelection(context, s.document).wrapInnerNodes(cloneNodes(nodes)...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// WrapInnerNode wraps an HTML structure, matched by the given selector, around
|
||||
@@ -493,16 +571,14 @@ func parseHtml(h string) []*html.Node {
|
||||
return nodes
|
||||
}
|
||||
|
||||
func setHtmlNodes(s *Selection, ns ...*html.Node) *Selection {
|
||||
for _, n := range s.Nodes {
|
||||
for c := n.FirstChild; c != nil; c = n.FirstChild {
|
||||
n.RemoveChild(c)
|
||||
}
|
||||
for _, c := range ns {
|
||||
n.AppendChild(cloneNode(c))
|
||||
}
|
||||
func parseHtmlWithContext(h string, context *html.Node) []*html.Node {
|
||||
// Errors are only returned when the io.Reader returns any error besides
|
||||
// EOF, but strings.Reader never will
|
||||
nodes, err := html.ParseFragment(strings.NewReader(h), context)
|
||||
if err != nil {
|
||||
panic("goquery: failed to parse HTML: " + err.Error())
|
||||
}
|
||||
return s
|
||||
return nodes
|
||||
}
|
||||
|
||||
// Get the first child that is an ElementNode
|
||||
@@ -572,3 +648,29 @@ func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// eachNodeHtml parses the given html string and inserts the resulting nodes in the dom with the mergeFn.
|
||||
// The parsed nodes are inserted for each element of the selection.
|
||||
// isParent can be used to indicate that the elements of the selection should be treated as the parent for the parsed html.
|
||||
// A cache is used to avoid parsing the html multiple times should the elements of the selection result in the same context.
|
||||
func (s *Selection) eachNodeHtml(htmlStr string, isParent bool, mergeFn func(n *html.Node, nodes []*html.Node)) *Selection {
|
||||
// cache to avoid parsing the html for the same context multiple times
|
||||
nodeCache := make(map[string][]*html.Node)
|
||||
var context *html.Node
|
||||
for _, n := range s.Nodes {
|
||||
if isParent {
|
||||
context = n.Parent
|
||||
} else {
|
||||
context = n
|
||||
}
|
||||
if context != nil {
|
||||
nodes, found := nodeCache[nodeName(context)]
|
||||
if !found {
|
||||
nodes = parseHtmlWithContext(htmlStr, context)
|
||||
nodeCache[nodeName(context)] = nodes
|
||||
}
|
||||
mergeFn(n, cloneNodes(nodes))
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
18
vendor/github.com/PuerkitoBio/goquery/utilities.go
generated
vendored
18
vendor/github.com/PuerkitoBio/goquery/utilities.go
generated
vendored
@@ -36,12 +36,22 @@ func NodeName(s *Selection) string {
|
||||
if s.Length() == 0 {
|
||||
return ""
|
||||
}
|
||||
switch n := s.Get(0); n.Type {
|
||||
return nodeName(s.Get(0))
|
||||
}
|
||||
|
||||
// nodeName returns the node name of the given html node.
|
||||
// See NodeName for additional details on behaviour.
|
||||
func nodeName(node *html.Node) string {
|
||||
if node == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch node.Type {
|
||||
case html.ElementNode, html.DoctypeNode:
|
||||
return n.Data
|
||||
return node.Data
|
||||
default:
|
||||
if n.Type >= 0 && int(n.Type) < len(nodeNames) {
|
||||
return nodeNames[n.Type]
|
||||
if node.Type >= 0 && int(node.Type) < len(nodeNames) {
|
||||
return nodeNames[node.Type]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
1
vendor/github.com/RoaringBitmap/roaring/.travis.yml
generated
vendored
1
vendor/github.com/RoaringBitmap/roaring/.travis.yml
generated
vendored
@@ -8,7 +8,6 @@ install:
|
||||
notifications:
|
||||
email: false
|
||||
go:
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
- tip
|
||||
|
||||
4
vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS
generated
vendored
4
vendor/github.com/RoaringBitmap/roaring/CONTRIBUTORS
generated
vendored
@@ -13,4 +13,6 @@ Forud Ghafouri (@fzerorubigd),
|
||||
Joe Nall (@joenall),
|
||||
(@fredim),
|
||||
Edd Robinson (@e-dard),
|
||||
Alexander Petrov (@alldroll)
|
||||
Alexander Petrov (@alldroll),
|
||||
Guy Molinari (@guymolinari),
|
||||
Ling Jin (@JinLingChristopher)
|
||||
|
||||
88
vendor/github.com/RoaringBitmap/roaring/README.md
generated
vendored
88
vendor/github.com/RoaringBitmap/roaring/README.md
generated
vendored
@@ -3,7 +3,6 @@ roaring [](https
|
||||

|
||||

|
||||

|
||||

|
||||
=============
|
||||
|
||||
This is a go version of the Roaring bitmap data structure.
|
||||
@@ -56,6 +55,93 @@ This code is licensed under Apache License, Version 2.0 (ASL2.0).
|
||||
|
||||
Copyright 2016-... by the authors.
|
||||
|
||||
When should you use a bitmap?
|
||||
===================================
|
||||
|
||||
|
||||
Sets are a fundamental abstraction in
|
||||
software. They can be implemented in various
|
||||
ways, as hash sets, as trees, and so forth.
|
||||
In databases and search engines, sets are often an integral
|
||||
part of indexes. For example, we may need to maintain a set
|
||||
of all documents or rows (represented by numerical identifier)
|
||||
that satisfy some property. Besides adding or removing
|
||||
elements from the set, we need fast functions
|
||||
to compute the intersection, the union, the difference between sets, and so on.
|
||||
|
||||
|
||||
To implement a set
|
||||
of integers, a particularly appealing strategy is the
|
||||
bitmap (also called bitset or bit vector). Using n bits,
|
||||
we can represent any set made of the integers from the range
|
||||
[0,n): the ith bit is set to one if integer i is present in the set.
|
||||
Commodity processors use words of W=32 or W=64 bits. By combining many such words, we can
|
||||
support large values of n. Intersections, unions and differences can then be implemented
|
||||
as bitwise AND, OR and ANDNOT operations.
|
||||
More complicated set functions can also be implemented as bitwise operations.
|
||||
|
||||
When the bitset approach is applicable, it can be orders of
|
||||
magnitude faster than other possible implementation of a set (e.g., as a hash set)
|
||||
while using several times less memory.
|
||||
|
||||
However, a bitset, even a compressed one is not always applicable. For example, if the
|
||||
you have 1000 random-looking integers, then a simple array might be the best representation.
|
||||
We refer to this case as the "sparse" scenario.
|
||||
|
||||
When should you use compressed bitmaps?
|
||||
===================================
|
||||
|
||||
An uncompressed BitSet can use a lot of memory. For example, if you take a BitSet
|
||||
and set the bit at position 1,000,000 to true and you have just over 100kB. That is over 100kB
|
||||
to store the position of one bit. This is wasteful even if you do not care about memory:
|
||||
suppose that you need to compute the intersection between this BitSet and another one
|
||||
that has a bit at position 1,000,001 to true, then you need to go through all these zeroes,
|
||||
whether you like it or not. That can become very wasteful.
|
||||
|
||||
This being said, there are definitively cases where attempting to use compressed bitmaps is wasteful.
|
||||
For example, if you have a small universe size. E.g., your bitmaps represent sets of integers
|
||||
from [0,n) where n is small (e.g., n=64 or n=128). If you are able to uncompressed BitSet and
|
||||
it does not blow up your memory usage, then compressed bitmaps are probably not useful
|
||||
to you. In fact, if you do not need compression, then a BitSet offers remarkable speed.
|
||||
|
||||
The sparse scenario is another use case where compressed bitmaps should not be used.
|
||||
Keep in mind that random-looking data is usually not compressible. E.g., if you have a small set of
|
||||
32-bit random integers, it is not mathematically possible to use far less than 32 bits per integer,
|
||||
and attempts at compression can be counterproductive.
|
||||
|
||||
How does Roaring compares with the alternatives?
|
||||
==================================================
|
||||
|
||||
|
||||
Most alternatives to Roaring are part of a larger family of compressed bitmaps that are run-length-encoded
|
||||
bitmaps. They identify long runs of 1s or 0s and they represent them with a marker word.
|
||||
If you have a local mix of 1s and 0, you use an uncompressed word.
|
||||
|
||||
There are many formats in this family:
|
||||
|
||||
* Oracle's BBC is an obsolete format at this point: though it may provide good compression,
|
||||
it is likely much slower than more recent alternatives due to excessive branching.
|
||||
* WAH is a patented variation on BBC that provides better performance.
|
||||
* Concise is a variation on the patented WAH. It some specific instances, it can compress
|
||||
much better than WAH (up to 2x better), but it is generally slower.
|
||||
* EWAH is both free of patent, and it is faster than all the above. On the downside, it
|
||||
does not compress quite as well. It is faster because it allows some form of "skipping"
|
||||
over uncompressed words. So though none of these formats are great at random access, EWAH
|
||||
is better than the alternatives.
|
||||
|
||||
|
||||
|
||||
There is a big problem with these formats however that can hurt you badly in some cases: there is no random access. If you want to check whether a given value is present in the set, you have to start from the beginning and "uncompress" the whole thing. This means that if you want to intersect a big set with a large set, you still have to uncompress the whole big set in the worst case...
|
||||
|
||||
Roaring solves this problem. It works in the following manner. It divides the data into chunks of 2<sup>16</sup> integers
|
||||
(e.g., [0, 2<sup>16</sup>), [2<sup>16</sup>, 2 x 2<sup>16</sup>), ...). Within a chunk, it can use an uncompressed bitmap, a simple list of integers,
|
||||
or a list of runs. Whatever format it uses, they all allow you to check for the present of any one value quickly
|
||||
(e.g., with a binary search). The net result is that Roaring can compute many operations much faster than run-length-encoded
|
||||
formats like WAH, EWAH, Concise... Maybe surprisingly, Roaring also generally offers better compression ratios.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### References
|
||||
|
||||
|
||||
38
vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
generated
vendored
38
vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
generated
vendored
@@ -359,28 +359,17 @@ func (ac *arrayContainer) iorArray(value2 *arrayContainer) container {
|
||||
len1 := value1.getCardinality()
|
||||
len2 := value2.getCardinality()
|
||||
maxPossibleCardinality := len1 + len2
|
||||
if maxPossibleCardinality > arrayDefaultMaxSize { // it could be a bitmap!
|
||||
bc := newBitmapContainer()
|
||||
for k := 0; k < len(value2.content); k++ {
|
||||
v := value2.content[k]
|
||||
i := uint(v) >> 6
|
||||
mask := uint64(1) << (v % 64)
|
||||
bc.bitmap[i] |= mask
|
||||
}
|
||||
for k := 0; k < len(ac.content); k++ {
|
||||
v := ac.content[k]
|
||||
i := uint(v) >> 6
|
||||
mask := uint64(1) << (v % 64)
|
||||
bc.bitmap[i] |= mask
|
||||
}
|
||||
bc.cardinality = int(popcntSlice(bc.bitmap))
|
||||
if bc.cardinality <= arrayDefaultMaxSize {
|
||||
return bc.toArrayContainer()
|
||||
}
|
||||
return bc
|
||||
}
|
||||
if maxPossibleCardinality > cap(value1.content) {
|
||||
newcontent := make([]uint16, 0, maxPossibleCardinality)
|
||||
// doubling the capacity reduces new slice allocations in the case of
|
||||
// repeated calls to iorArray().
|
||||
newSize := 2 * maxPossibleCardinality
|
||||
// the second check is to handle overly large array containers
|
||||
// and should not occur in normal usage,
|
||||
// as all array containers should be at most arrayDefaultMaxSize
|
||||
if newSize > 2*arrayDefaultMaxSize && maxPossibleCardinality <= 2*arrayDefaultMaxSize {
|
||||
newSize = 2 * arrayDefaultMaxSize
|
||||
}
|
||||
newcontent := make([]uint16, 0, newSize)
|
||||
copy(newcontent[len2:maxPossibleCardinality], ac.content[0:len1])
|
||||
ac.content = newcontent
|
||||
} else {
|
||||
@@ -388,6 +377,13 @@ func (ac *arrayContainer) iorArray(value2 *arrayContainer) container {
|
||||
}
|
||||
nl := union2by2(value1.content[len2:maxPossibleCardinality], value2.content, ac.content)
|
||||
ac.content = ac.content[:nl] // reslice to match actual used capacity
|
||||
|
||||
if nl > arrayDefaultMaxSize {
|
||||
// Only converting to a bitmap when arrayDefaultMaxSize
|
||||
// is actually exceeded minimizes conversions in the case of repeated
|
||||
// calls to iorArray().
|
||||
return ac.toBitmapContainer()
|
||||
}
|
||||
return ac
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/RoaringBitmap/roaring/go.mod
generated
vendored
4
vendor/github.com/RoaringBitmap/roaring/go.mod
generated
vendored
@@ -1,6 +1,6 @@
|
||||
module github.com/RoaringBitmap/roaring
|
||||
|
||||
go 1.12
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2
|
||||
@@ -13,4 +13,6 @@ require (
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/tinylib/msgp v1.1.0
|
||||
github.com/willf/bitset v1.1.10
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/tools v0.0.0-20200928182047-19e03678916f // indirect
|
||||
)
|
||||
|
||||
25
vendor/github.com/RoaringBitmap/roaring/go.sum
generated
vendored
25
vendor/github.com/RoaringBitmap/roaring/go.sum
generated
vendored
@@ -24,6 +24,31 @@ github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
|
||||
github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200928182047-19e03678916f h1:VwGa2Wf+rHGIxvsssCkUNIyFv8jQY0VCBCNWtikoWq0=
|
||||
golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
|
||||
10
vendor/github.com/RoaringBitmap/roaring/roaring.go
generated
vendored
10
vendor/github.com/RoaringBitmap/roaring/roaring.go
generated
vendored
@@ -345,9 +345,9 @@ func newIntReverseIterator(a *Bitmap) *intReverseIterator {
|
||||
|
||||
// ManyIntIterable allows you to iterate over the values in a Bitmap
|
||||
type ManyIntIterable interface {
|
||||
// pass in a buffer to fill up with values, returns how many values were returned
|
||||
// NextMany fills buf up with values, returns how many values were returned
|
||||
NextMany(buf []uint32) int
|
||||
// pass in a buffer to fill up with 64 bit values, returns how many values were returned
|
||||
// NextMany64 fills up buf with 64 bit values, uses hs as a mask (OR), returns how many values were returned
|
||||
NextMany64(hs uint64, buf []uint64) int
|
||||
}
|
||||
|
||||
@@ -1006,7 +1006,7 @@ main:
|
||||
}
|
||||
s2 = x2.highlowcontainer.getKeyAtIndex(pos2)
|
||||
} else {
|
||||
rb.highlowcontainer.replaceKeyAndContainerAtIndex(pos1, s1, rb.highlowcontainer.getWritableContainerAtIndex(pos1).ior(x2.highlowcontainer.getContainerAtIndex(pos2)), false)
|
||||
rb.highlowcontainer.replaceKeyAndContainerAtIndex(pos1, s1, rb.highlowcontainer.getUnionedWritableContainer(pos1, x2.highlowcontainer.getContainerAtIndex(pos2)), false)
|
||||
pos1++
|
||||
pos2++
|
||||
if (pos1 == length1) || (pos2 == length2) {
|
||||
@@ -1581,7 +1581,3 @@ func (rb *Bitmap) Stats() Statistics {
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func (rb *Bitmap) FillLeastSignificant32bits(x []uint64, i uint64, mask uint64) {
|
||||
rb.ManyIterator().NextMany64(mask, x[i:])
|
||||
}
|
||||
|
||||
11
vendor/github.com/RoaringBitmap/roaring/roaringarray.go
generated
vendored
11
vendor/github.com/RoaringBitmap/roaring/roaringarray.go
generated
vendored
@@ -328,6 +328,17 @@ func (ra *roaringArray) getFastContainerAtIndex(i int, needsWriteable bool) cont
|
||||
return c
|
||||
}
|
||||
|
||||
// getUnionedWritableContainer switches behavior for in-place Or
|
||||
// depending on whether the container requires a copy on write.
|
||||
// If it does using the non-inplace or() method leads to fewer allocations.
|
||||
func (ra *roaringArray) getUnionedWritableContainer(pos int, other container) container {
|
||||
if ra.needCopyOnWrite[pos] {
|
||||
return ra.getContainerAtIndex(pos).or(other)
|
||||
}
|
||||
return ra.getContainerAtIndex(pos).ior(other)
|
||||
|
||||
}
|
||||
|
||||
func (ra *roaringArray) getWritableContainerAtIndex(i int) container {
|
||||
if ra.needCopyOnWrite[i] {
|
||||
ra.containers[i] = ra.containers[i].clone()
|
||||
|
||||
2
vendor/github.com/RoaringBitmap/roaring/serialization_generic.go
generated
vendored
2
vendor/github.com/RoaringBitmap/roaring/serialization_generic.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !amd64,!386 appengine
|
||||
// +build !amd64,!386,!arm,!arm64,!ppc64le,!mipsle,!mips64le,!mips64p32le,!wasm appengine
|
||||
|
||||
package roaring
|
||||
|
||||
|
||||
2
vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go
generated
vendored
2
vendor/github.com/RoaringBitmap/roaring/serialization_littleendian.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build 386 amd64,!appengine
|
||||
// +build 386,!appengine amd64,!appengine arm,!appengine arm64,!appengine ppc64le,!appengine mipsle,!appengine mips64le,!appengine mips64p32le,!appengine wasm,!appengine
|
||||
|
||||
package roaring
|
||||
|
||||
|
||||
60
vendor/github.com/RoaringBitmap/roaring/setutil.go
generated
vendored
60
vendor/github.com/RoaringBitmap/roaring/setutil.go
generated
vendored
@@ -135,66 +135,6 @@ func exclusiveUnion2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
||||
return pos
|
||||
}
|
||||
|
||||
func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
||||
pos := 0
|
||||
k1 := 0
|
||||
k2 := 0
|
||||
if 0 == len(set2) {
|
||||
buffer = buffer[:len(set1)]
|
||||
copy(buffer, set1[:])
|
||||
return len(set1)
|
||||
}
|
||||
if 0 == len(set1) {
|
||||
buffer = buffer[:len(set2)]
|
||||
copy(buffer, set2[:])
|
||||
return len(set2)
|
||||
}
|
||||
s1 := set1[k1]
|
||||
s2 := set2[k2]
|
||||
buffer = buffer[:cap(buffer)]
|
||||
for {
|
||||
if s1 < s2 {
|
||||
buffer[pos] = s1
|
||||
pos++
|
||||
k1++
|
||||
if k1 >= len(set1) {
|
||||
copy(buffer[pos:], set2[k2:])
|
||||
pos += len(set2) - k2
|
||||
break
|
||||
}
|
||||
s1 = set1[k1]
|
||||
} else if s1 == s2 {
|
||||
buffer[pos] = s1
|
||||
pos++
|
||||
k1++
|
||||
k2++
|
||||
if k1 >= len(set1) {
|
||||
copy(buffer[pos:], set2[k2:])
|
||||
pos += len(set2) - k2
|
||||
break
|
||||
}
|
||||
if k2 >= len(set2) {
|
||||
copy(buffer[pos:], set1[k1:])
|
||||
pos += len(set1) - k1
|
||||
break
|
||||
}
|
||||
s1 = set1[k1]
|
||||
s2 = set2[k2]
|
||||
} else { // if (set1[k1]>set2[k2])
|
||||
buffer[pos] = s2
|
||||
pos++
|
||||
k2++
|
||||
if k2 >= len(set2) {
|
||||
copy(buffer[pos:], set1[k1:])
|
||||
pos += len(set1) - k1
|
||||
break
|
||||
}
|
||||
s2 = set2[k2]
|
||||
}
|
||||
}
|
||||
return pos
|
||||
}
|
||||
|
||||
func union2by2Cardinality(set1 []uint16, set2 []uint16) int {
|
||||
pos := 0
|
||||
k1 := 0
|
||||
|
||||
6
vendor/github.com/RoaringBitmap/roaring/setutil_arm64.go
generated
vendored
Normal file
6
vendor/github.com/RoaringBitmap/roaring/setutil_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// +build arm64,!gccgo,!appengine
|
||||
|
||||
package roaring
|
||||
|
||||
//go:noescape
|
||||
func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) (size int)
|
||||
132
vendor/github.com/RoaringBitmap/roaring/setutil_arm64.s
generated
vendored
Normal file
132
vendor/github.com/RoaringBitmap/roaring/setutil_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
// +build arm64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
||||
// This implements union2by2 using golang's version of arm64 assembly
|
||||
// The algorithm is very similar to the generic one,
|
||||
// but makes better use of arm64 features so is notably faster.
|
||||
// The basic algorithm structure is as follows:
|
||||
// 1. If either set is empty, copy the other set into the buffer and return the length
|
||||
// 2. Otherwise, load the first element of each set into a variable (s1 and s2).
|
||||
// 3. a. Compare the values of s1 and s2.
|
||||
// b. add the smaller one to the buffer.
|
||||
// c. perform a bounds check before incrementing.
|
||||
// If one set is finished, copy the rest of the other set over.
|
||||
// d. update s1 and or s2 to the next value, continue loop.
|
||||
//
|
||||
// Past the fact of the algorithm, this code makes use of several arm64 features
|
||||
// Condition Codes:
|
||||
// arm64's CMP operation sets 4 bits that can be used for branching,
|
||||
// rather than just true or false.
|
||||
// As a consequence, a single comparison gives enough information to distinguish the three cases
|
||||
//
|
||||
// Post-increment pointers after load/store:
|
||||
// Instructions like `MOVHU.P 2(R0), R6`
|
||||
// increment the register by a specified amount, in this example 2.
|
||||
// Because uint16's are exactly 2 bytes and the length of the slices
|
||||
// is part of the slice header,
|
||||
// there is no need to separately track the index into the slice.
|
||||
// Instead, the code can calculate the final read value and compare against that,
|
||||
// using the post-increment reads to move the pointers along.
|
||||
//
|
||||
// TODO: CALL out to memmove once the list is exhausted.
|
||||
// Right now it moves the necessary shorts so that the remaining count
|
||||
// is a multiple of 4 and then copies 64 bits at a time.
|
||||
|
||||
TEXT ·union2by2(SB), NOSPLIT, $0-80
|
||||
// R0, R1, and R2 for the pointers to the three slices
|
||||
MOVD set1+0(FP), R0
|
||||
MOVD set2+24(FP), R1
|
||||
MOVD buffer+48(FP), R2
|
||||
|
||||
//R3 and R4 will be the values at which we will have finished reading set1 and set2.
|
||||
// R3 should be R0 + 2 * set1_len+8(FP)
|
||||
MOVD set1_len+8(FP), R3
|
||||
MOVD set2_len+32(FP), R4
|
||||
|
||||
ADD R3<<1, R0, R3
|
||||
ADD R4<<1, R1, R4
|
||||
|
||||
|
||||
//Rather than counting the number of elements added separately
|
||||
//Save the starting register of buffer.
|
||||
MOVD buffer+48(FP), R5
|
||||
|
||||
// set1 is empty, just flush set2
|
||||
CMP R0, R3
|
||||
BEQ flush_right
|
||||
|
||||
// set2 is empty, just flush set1
|
||||
CMP R1, R4
|
||||
BEQ flush_left
|
||||
|
||||
// R6, R7 are the working space for s1 and s2
|
||||
MOVD ZR, R6
|
||||
MOVD ZR, R7
|
||||
|
||||
MOVHU.P 2(R0), R6
|
||||
MOVHU.P 2(R1), R7
|
||||
loop:
|
||||
|
||||
CMP R6, R7
|
||||
BEQ pop_both // R6 == R7
|
||||
BLS pop_right // R6 > R7
|
||||
//pop_left: // R6 < R7
|
||||
MOVHU.P R6, 2(R2)
|
||||
CMP R0, R3
|
||||
BEQ pop_then_flush_right
|
||||
MOVHU.P 2(R0), R6
|
||||
JMP loop
|
||||
pop_both:
|
||||
MOVHU.P R6, 2(R2) //could also use R7, since they are equal
|
||||
CMP R0, R3
|
||||
BEQ flush_right
|
||||
CMP R1, R4
|
||||
BEQ flush_left
|
||||
MOVHU.P 2(R0), R6
|
||||
MOVHU.P 2(R1), R7
|
||||
JMP loop
|
||||
pop_right:
|
||||
MOVHU.P R7, 2(R2)
|
||||
CMP R1, R4
|
||||
BEQ pop_then_flush_left
|
||||
MOVHU.P 2(R1), R7
|
||||
JMP loop
|
||||
|
||||
pop_then_flush_right:
|
||||
MOVHU.P R7, 2(R2)
|
||||
flush_right:
|
||||
MOVD R1, R0
|
||||
MOVD R4, R3
|
||||
JMP flush_left
|
||||
pop_then_flush_left:
|
||||
MOVHU.P R6, 2(R2)
|
||||
flush_left:
|
||||
CMP R0, R3
|
||||
BEQ return
|
||||
//figure out how many bytes to slough off. Must be a multiple of two
|
||||
SUB R0, R3, R4
|
||||
ANDS $6, R4
|
||||
BEQ long_flush //handles the 0 mod 8 case
|
||||
SUBS $4, R4, R4 // since possible values are 2, 4, 6, this splits evenly
|
||||
BLT pop_single // exactly the 2 case
|
||||
MOVW.P 4(R0), R6
|
||||
MOVW.P R6, 4(R2)
|
||||
BEQ long_flush // we're now aligned by 64 bits, as R4==4, otherwise 2 more
|
||||
pop_single:
|
||||
MOVHU.P 2(R0), R6
|
||||
MOVHU.P R6, 2(R2)
|
||||
long_flush:
|
||||
// at this point we know R3 - R0 is a multiple of 8.
|
||||
CMP R0, R3
|
||||
BEQ return
|
||||
MOVD.P 8(R0), R6
|
||||
MOVD.P R6, 8(R2)
|
||||
JMP long_flush
|
||||
return:
|
||||
// number of shorts written is (R5 - R2) >> 1
|
||||
SUB R5, R2
|
||||
LSR $1, R2, R2
|
||||
MOVD R2, size+72(FP)
|
||||
RET
|
||||
63
vendor/github.com/RoaringBitmap/roaring/setutil_generic.go
generated
vendored
Normal file
63
vendor/github.com/RoaringBitmap/roaring/setutil_generic.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// +build !arm64 gccgo appengine
|
||||
|
||||
package roaring
|
||||
|
||||
func union2by2(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
||||
pos := 0
|
||||
k1 := 0
|
||||
k2 := 0
|
||||
if 0 == len(set2) {
|
||||
buffer = buffer[:len(set1)]
|
||||
copy(buffer, set1[:])
|
||||
return len(set1)
|
||||
}
|
||||
if 0 == len(set1) {
|
||||
buffer = buffer[:len(set2)]
|
||||
copy(buffer, set2[:])
|
||||
return len(set2)
|
||||
}
|
||||
s1 := set1[k1]
|
||||
s2 := set2[k2]
|
||||
buffer = buffer[:cap(buffer)]
|
||||
for {
|
||||
if s1 < s2 {
|
||||
buffer[pos] = s1
|
||||
pos++
|
||||
k1++
|
||||
if k1 >= len(set1) {
|
||||
copy(buffer[pos:], set2[k2:])
|
||||
pos += len(set2) - k2
|
||||
break
|
||||
}
|
||||
s1 = set1[k1]
|
||||
} else if s1 == s2 {
|
||||
buffer[pos] = s1
|
||||
pos++
|
||||
k1++
|
||||
k2++
|
||||
if k1 >= len(set1) {
|
||||
copy(buffer[pos:], set2[k2:])
|
||||
pos += len(set2) - k2
|
||||
break
|
||||
}
|
||||
if k2 >= len(set2) {
|
||||
copy(buffer[pos:], set1[k1:])
|
||||
pos += len(set1) - k1
|
||||
break
|
||||
}
|
||||
s1 = set1[k1]
|
||||
s2 = set2[k2]
|
||||
} else { // if (set1[k1]>set2[k2])
|
||||
buffer[pos] = s2
|
||||
pos++
|
||||
k2++
|
||||
if k2 >= len(set2) {
|
||||
copy(buffer[pos:], set1[k1:])
|
||||
pos += len(set1) - k1
|
||||
break
|
||||
}
|
||||
s2 = set2[k2]
|
||||
}
|
||||
}
|
||||
return pos
|
||||
}
|
||||
2
vendor/github.com/andybalholm/brotli/README.md
generated
vendored
2
vendor/github.com/andybalholm/brotli/README.md
generated
vendored
@@ -3,3 +3,5 @@ It was translated from the reference implementation (https://github.com/google/b
|
||||
with the `c2go` tool at https://github.com/andybalholm/c2go.
|
||||
|
||||
I am using it in production with https://github.com/andybalholm/redwood.
|
||||
|
||||
API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc.
|
||||
|
||||
36
vendor/github.com/andybalholm/brotli/backward_references.go
generated
vendored
36
vendor/github.com/andybalholm/brotli/backward_references.go
generated
vendored
@@ -1,5 +1,9 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
/* Copyright 2013 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
@@ -31,13 +35,10 @@ func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uin
|
||||
return distance + numDistanceShortCodes - 1
|
||||
}
|
||||
|
||||
/* "commands" points to the next output command to write to, "*num_commands" is
|
||||
initially the total amount of commands output by previous
|
||||
CreateBackwardReferences calls, and must be incremented by the amount written
|
||||
by this call. */
|
||||
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
|
||||
var hasherSearchResultPool sync.Pool
|
||||
|
||||
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var orig_commands []command = commands
|
||||
var insert_length uint = *last_insert_len
|
||||
var pos_end uint = position + num_bytes
|
||||
var store_end uint
|
||||
@@ -57,8 +58,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||
|
||||
/* Minimum score to accept a backward reference. */
|
||||
hasher.PrepareDistanceCache(dist_cache)
|
||||
var sr2 hasherSearchResult
|
||||
var sr hasherSearchResult
|
||||
sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||
if sr2 == nil {
|
||||
sr2 = &hasherSearchResult{}
|
||||
}
|
||||
sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult)
|
||||
if sr == nil {
|
||||
sr = &hasherSearchResult{}
|
||||
}
|
||||
|
||||
for position+hasher.HashTypeLength() < pos_end {
|
||||
var max_length uint = pos_end - position
|
||||
@@ -67,7 +74,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||
sr.len_code_delta = 0
|
||||
sr.distance = 0
|
||||
sr.score = kMinScore
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, &sr)
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr)
|
||||
if sr.score > kMinScore {
|
||||
/* Found a match. Let's look for something even better ahead. */
|
||||
var delayed_backward_references_in_row int = 0
|
||||
@@ -83,14 +90,14 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||
sr2.distance = 0
|
||||
sr2.score = kMinScore
|
||||
max_distance = brotli_min_size_t(position+1, max_backward_limit)
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, &sr2)
|
||||
hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2)
|
||||
if sr2.score >= sr.score+cost_diff_lazy {
|
||||
/* Ok, let's just write one byte for now and start a match from the
|
||||
next byte. */
|
||||
position++
|
||||
|
||||
insert_length++
|
||||
sr = sr2
|
||||
*sr = *sr2
|
||||
delayed_backward_references_in_row++
|
||||
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
|
||||
continue
|
||||
@@ -114,8 +121,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||
hasher.PrepareDistanceCache(dist_cache)
|
||||
}
|
||||
|
||||
initCommand(&commands[0], ¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code)
|
||||
commands = commands[1:]
|
||||
*commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code))
|
||||
}
|
||||
|
||||
*num_literals += insert_length
|
||||
@@ -173,5 +179,7 @@ func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte,
|
||||
|
||||
insert_length += pos_end - position
|
||||
*last_insert_len = insert_length
|
||||
*num_commands += uint(-cap(commands) + cap(orig_commands))
|
||||
|
||||
hasherSearchResultPool.Put(sr)
|
||||
hasherSearchResultPool.Put(sr2)
|
||||
}
|
||||
|
||||
37
vendor/github.com/andybalholm/brotli/backward_references_hq.go
generated
vendored
37
vendor/github.com/andybalholm/brotli/backward_references_hq.go
generated
vendored
@@ -123,14 +123,13 @@ func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, co
|
||||
}
|
||||
}
|
||||
|
||||
func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, num_commands uint, last_insert_len uint) {
|
||||
func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) {
|
||||
var histogram_literal [numLiteralSymbols]uint32
|
||||
var histogram_cmd [numCommandSymbols]uint32
|
||||
var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32
|
||||
var cost_literal [numLiteralSymbols]float32
|
||||
var pos uint = position - last_insert_len
|
||||
var min_cost_cmd float32 = kInfinity
|
||||
var i uint
|
||||
var cost_cmd []float32 = self.cost_cmd_[:]
|
||||
var literal_costs []float32
|
||||
|
||||
@@ -138,7 +137,7 @@ func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbu
|
||||
histogram_cmd = [numCommandSymbols]uint32{}
|
||||
histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{}
|
||||
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range commands {
|
||||
var inslength uint = uint(commands[i].insert_len_)
|
||||
var copylength uint = uint(commandCopyLen(&commands[i]))
|
||||
var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF
|
||||
@@ -161,7 +160,7 @@ func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbu
|
||||
setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd)
|
||||
setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_)
|
||||
|
||||
for i = 0; i < numCommandSymbols; i++ {
|
||||
for i := 0; i < numCommandSymbols; i++ {
|
||||
min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i])
|
||||
}
|
||||
|
||||
@@ -169,10 +168,10 @@ func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbu
|
||||
{
|
||||
literal_costs = self.literal_costs_
|
||||
var literal_carry float32 = 0.0
|
||||
var num_bytes uint = self.num_bytes_
|
||||
num_bytes := int(self.num_bytes_)
|
||||
literal_costs[0] = 0.0
|
||||
for i = 0; i < num_bytes; i++ {
|
||||
literal_carry += cost_literal[ringbuffer[(position+i)&ringbuffer_mask]]
|
||||
for i := 0; i < num_bytes; i++ {
|
||||
literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]]
|
||||
literal_costs[i+1] = literal_costs[i] + literal_carry
|
||||
literal_carry -= literal_costs[i+1] - literal_costs[i]
|
||||
}
|
||||
@@ -502,7 +501,9 @@ func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte,
|
||||
var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
|
||||
if cost < nodes[pos+len].u.cost {
|
||||
updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost)
|
||||
result = brotli_max_size_t(result, uint(len))
|
||||
if len > result {
|
||||
result = len
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -530,7 +531,7 @@ func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint {
|
||||
}
|
||||
|
||||
/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */
|
||||
func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands []command, num_literals *uint) {
|
||||
func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var pos uint = 0
|
||||
var offset uint32 = nodes[0].u.next
|
||||
@@ -552,7 +553,7 @@ func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode,
|
||||
var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit)
|
||||
var is_dictionary bool = (distance > max_distance+gap)
|
||||
var dist_code uint = uint(zopfliNodeDistanceCode(next))
|
||||
initCommand(&commands[i], ¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code)
|
||||
*commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code))
|
||||
|
||||
if !is_dictionary && dist_code > 0 {
|
||||
dist_cache[3] = dist_cache[2]
|
||||
@@ -679,16 +680,16 @@ func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte,
|
||||
return computeShortestPathFromNodes(num_bytes, nodes)
|
||||
}
|
||||
|
||||
func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
|
||||
func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var nodes []zopfliNode
|
||||
nodes = make([]zopfliNode, (num_bytes + 1))
|
||||
initZopfliNodes(nodes, num_bytes+1)
|
||||
*num_commands += zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes)
|
||||
zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes)
|
||||
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
|
||||
nodes = nil
|
||||
}
|
||||
|
||||
func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
|
||||
func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) {
|
||||
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
|
||||
var num_matches []uint32 = make([]uint32, num_bytes)
|
||||
var matches_size uint = 4 * num_bytes
|
||||
@@ -703,7 +704,7 @@ func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer
|
||||
var orig_num_literals uint
|
||||
var orig_last_insert_len uint
|
||||
var orig_dist_cache [4]int
|
||||
var orig_num_commands uint
|
||||
var orig_num_commands int
|
||||
var model zopfliCostModel
|
||||
var nodes []zopfliNode
|
||||
var matches []backwardMatch = make([]backwardMatch, matches_size)
|
||||
@@ -769,7 +770,7 @@ func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer
|
||||
orig_num_literals = *num_literals
|
||||
orig_last_insert_len = *last_insert_len
|
||||
copy(orig_dist_cache[:], dist_cache[:4])
|
||||
orig_num_commands = *num_commands
|
||||
orig_num_commands = len(*commands)
|
||||
nodes = make([]zopfliNode, (num_bytes + 1))
|
||||
initZopfliCostModel(&model, ¶ms.dist, num_bytes)
|
||||
for i = 0; i < 2; i++ {
|
||||
@@ -777,14 +778,14 @@ func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer
|
||||
if i == 0 {
|
||||
zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask)
|
||||
} else {
|
||||
zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, commands, *num_commands-orig_num_commands, orig_last_insert_len)
|
||||
zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len)
|
||||
}
|
||||
|
||||
*num_commands = orig_num_commands
|
||||
*commands = (*commands)[:orig_num_commands]
|
||||
*num_literals = orig_num_literals
|
||||
*last_insert_len = orig_last_insert_len
|
||||
copy(dist_cache, orig_dist_cache[:4])
|
||||
*num_commands += zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes)
|
||||
zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes)
|
||||
zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals)
|
||||
}
|
||||
|
||||
|
||||
37
vendor/github.com/andybalholm/brotli/block_splitter.go
generated
vendored
37
vendor/github.com/andybalholm/brotli/block_splitter.go
generated
vendored
@@ -33,23 +33,21 @@ const (
|
||||
kMinItersForRefining uint = 100
|
||||
)
|
||||
|
||||
func countLiterals(cmds []command, num_commands uint) uint {
|
||||
func countLiterals(cmds []command) uint {
|
||||
var total_length uint = 0
|
||||
/* Count how many we have. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range cmds {
|
||||
total_length += uint(cmds[i].insert_len_)
|
||||
}
|
||||
|
||||
return total_length
|
||||
}
|
||||
|
||||
func copyLiteralsToByteArray(cmds []command, num_commands uint, data []byte, offset uint, mask uint, literals []byte) {
|
||||
func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) {
|
||||
var pos uint = 0
|
||||
var from_pos uint = offset & mask
|
||||
var i uint
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range cmds {
|
||||
var insert_len uint = uint(cmds[i].insert_len_)
|
||||
if from_pos+insert_len > mask {
|
||||
var head_size uint = mask + 1 - from_pos
|
||||
@@ -90,24 +88,19 @@ const clustersPerBatch = 16
|
||||
func initBlockSplit(self *blockSplit) {
|
||||
self.num_types = 0
|
||||
self.num_blocks = 0
|
||||
self.types = nil
|
||||
self.lengths = nil
|
||||
self.types = self.types[:0]
|
||||
self.lengths = self.lengths[:0]
|
||||
self.types_alloc_size = 0
|
||||
self.lengths_alloc_size = 0
|
||||
}
|
||||
|
||||
func destroyBlockSplit(self *blockSplit) {
|
||||
self.types = nil
|
||||
self.lengths = nil
|
||||
}
|
||||
|
||||
func splitBlock(cmds []command, num_commands uint, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
|
||||
func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) {
|
||||
{
|
||||
var literals_count uint = countLiterals(cmds, num_commands)
|
||||
var literals_count uint = countLiterals(cmds)
|
||||
var literals []byte = make([]byte, literals_count)
|
||||
|
||||
/* Create a continuous array of literals. */
|
||||
copyLiteralsToByteArray(cmds, num_commands, data, pos, mask, literals)
|
||||
copyLiteralsToByteArray(cmds, data, pos, mask, literals)
|
||||
|
||||
/* Create the block split on the array of literals.
|
||||
Literal histograms have alphabet size 256. */
|
||||
@@ -116,28 +109,26 @@ func splitBlock(cmds []command, num_commands uint, data []byte, pos uint, mask u
|
||||
literals = nil
|
||||
}
|
||||
{
|
||||
var insert_and_copy_codes []uint16 = make([]uint16, num_commands)
|
||||
var insert_and_copy_codes []uint16 = make([]uint16, len(cmds))
|
||||
/* Compute prefix codes for commands. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range cmds {
|
||||
insert_and_copy_codes[i] = cmds[i].cmd_prefix_
|
||||
}
|
||||
|
||||
/* Create the block split on the array of command prefixes. */
|
||||
splitByteVectorCommand(insert_and_copy_codes, num_commands, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
|
||||
splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split)
|
||||
|
||||
/* TODO: reuse for distances? */
|
||||
|
||||
insert_and_copy_codes = nil
|
||||
}
|
||||
{
|
||||
var distance_prefixes []uint16 = make([]uint16, num_commands)
|
||||
var distance_prefixes []uint16 = make([]uint16, len(cmds))
|
||||
var j uint = 0
|
||||
/* Create a continuous array of distance prefixes. */
|
||||
|
||||
var i uint
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range cmds {
|
||||
var cmd *command = &cmds[i]
|
||||
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
|
||||
distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF
|
||||
|
||||
3
vendor/github.com/andybalholm/brotli/block_splitter_command.go
generated
vendored
3
vendor/github.com/andybalholm/brotli/block_splitter_command.go
generated
vendored
@@ -372,7 +372,8 @@ func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids
|
||||
histogram_symbols = nil
|
||||
}
|
||||
|
||||
func splitByteVectorCommand(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) {
|
||||
length := uint(len(data))
|
||||
var data_size uint = histogramDataSizeCommand()
|
||||
var num_histograms uint = length/literals_per_histogram + 1
|
||||
var histograms []histogramCommand
|
||||
|
||||
484
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
generated
vendored
484
vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
generated
vendored
File diff suppressed because it is too large
Load Diff
24
vendor/github.com/andybalholm/brotli/command.go
generated
vendored
24
vendor/github.com/andybalholm/brotli/command.go
generated
vendored
@@ -194,26 +194,28 @@ type command struct {
|
||||
}
|
||||
|
||||
/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */
|
||||
func initCommand(self *command, dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) {
|
||||
func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) {
|
||||
/* Don't rely on signed int representation, use honest casts. */
|
||||
var delta uint32 = uint32(byte(int8(copylen_code_delta)))
|
||||
self.insert_len_ = uint32(insertlen)
|
||||
self.copy_len_ = uint32(uint32(copylen) | delta<<25)
|
||||
cmd.insert_len_ = uint32(insertlen)
|
||||
cmd.copy_len_ = uint32(uint32(copylen) | delta<<25)
|
||||
|
||||
/* The distance prefix and extra bits are stored in this Command as if
|
||||
npostfix and ndirect were 0, they are only recomputed later after the
|
||||
clustering if needed. */
|
||||
prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &self.dist_prefix_, &self.dist_extra_)
|
||||
prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
|
||||
getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_)
|
||||
|
||||
getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (self.dist_prefix_&0x3FF == 0), &self.cmd_prefix_)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func initInsertCommand(self *command, insertlen uint) {
|
||||
self.insert_len_ = uint32(insertlen)
|
||||
self.copy_len_ = 4 << 25
|
||||
self.dist_extra_ = 0
|
||||
self.dist_prefix_ = numDistanceShortCodes
|
||||
getLengthCode(insertlen, 4, false, &self.cmd_prefix_)
|
||||
func makeInsertCommand(insertlen uint) (cmd command) {
|
||||
cmd.insert_len_ = uint32(insertlen)
|
||||
cmd.copy_len_ = 4 << 25
|
||||
cmd.dist_extra_ = 0
|
||||
cmd.dist_prefix_ = numDistanceShortCodes
|
||||
getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 {
|
||||
|
||||
365
vendor/github.com/andybalholm/brotli/compress_fragment.go
generated
vendored
365
vendor/github.com/andybalholm/brotli/compress_fragment.go
generated
vendored
@@ -33,14 +33,8 @@ func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 {
|
||||
}
|
||||
|
||||
func isMatch5(p1 []byte, p2 []byte) bool {
|
||||
var i int
|
||||
for i = 0; i < 5; i++ {
|
||||
if p1[i] != p2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) &&
|
||||
p1[4] == p2[4]
|
||||
}
|
||||
|
||||
/* Builds a literal prefix code into "depths" and "bits" based on the statistics
|
||||
@@ -51,7 +45,7 @@ func isMatch5(p1 []byte, p2 []byte) bool {
|
||||
and thus have to assign a non-zero depth for each literal.
|
||||
Returns estimated compression ratio millibytes/char for encoding given input
|
||||
with generated code. */
|
||||
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint {
|
||||
func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, bw *bitWriter) uint {
|
||||
var histogram = [256]uint32{0}
|
||||
var histogram_total uint
|
||||
var i uint
|
||||
@@ -88,7 +82,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
|
||||
}
|
||||
|
||||
buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */
|
||||
8, depths, bits, storage_ix, storage)
|
||||
8, depths, bits, bw)
|
||||
{
|
||||
var literal_ratio uint = 0
|
||||
for i = 0; i < 256; i++ {
|
||||
@@ -104,7 +98,7 @@ func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte
|
||||
|
||||
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
||||
"bits" based on "histogram" and stores it into the bit stream. */
|
||||
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||
func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
|
||||
var tree [129]huffmanTree
|
||||
var cmd_depth = [numCommandSymbols]byte{0}
|
||||
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
||||
@@ -151,141 +145,141 @@ func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []ui
|
||||
cmd_depth[448+8*i] = depth[56+i]
|
||||
}
|
||||
|
||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
|
||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
|
||||
}
|
||||
|
||||
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
|
||||
storeHuffmanTree(depth[64:], 64, tree[:], bw)
|
||||
}
|
||||
|
||||
/* REQUIRES: insertlen < 6210 */
|
||||
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||
func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||
if insertlen < 6 {
|
||||
var code uint = insertlen + 40
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
histo[code]++
|
||||
} else if insertlen < 130 {
|
||||
var tail uint = insertlen - 2
|
||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||
var prefix uint = tail >> nbits
|
||||
var inscode uint = uint((nbits << 1) + uint32(prefix) + 42)
|
||||
writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[inscode]), uint64(bits[inscode]))
|
||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
||||
histo[inscode]++
|
||||
} else if insertlen < 2114 {
|
||||
var tail uint = insertlen - 66
|
||||
var nbits uint32 = log2FloorNonZero(tail)
|
||||
var code uint = uint(nbits + 50)
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
||||
histo[code]++
|
||||
} else {
|
||||
writeBits(uint(depth[61]), uint64(bits[61]), storage_ix, storage)
|
||||
writeBits(12, uint64(insertlen)-2114, storage_ix, storage)
|
||||
bw.writeBits(uint(depth[61]), uint64(bits[61]))
|
||||
bw.writeBits(12, uint64(insertlen)-2114)
|
||||
histo[61]++
|
||||
}
|
||||
}
|
||||
|
||||
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||
func emitLongInsertLen(insertlen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||
if insertlen < 22594 {
|
||||
writeBits(uint(depth[62]), uint64(bits[62]), storage_ix, storage)
|
||||
writeBits(14, uint64(insertlen)-6210, storage_ix, storage)
|
||||
bw.writeBits(uint(depth[62]), uint64(bits[62]))
|
||||
bw.writeBits(14, uint64(insertlen)-6210)
|
||||
histo[62]++
|
||||
} else {
|
||||
writeBits(uint(depth[63]), uint64(bits[63]), storage_ix, storage)
|
||||
writeBits(24, uint64(insertlen)-22594, storage_ix, storage)
|
||||
bw.writeBits(uint(depth[63]), uint64(bits[63]))
|
||||
bw.writeBits(24, uint64(insertlen)-22594)
|
||||
histo[63]++
|
||||
}
|
||||
}
|
||||
|
||||
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||
func emitCopyLen1(copylen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||
if copylen < 10 {
|
||||
writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[copylen+14]), uint64(bits[copylen+14]))
|
||||
histo[copylen+14]++
|
||||
} else if copylen < 134 {
|
||||
var tail uint = copylen - 6
|
||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||
var prefix uint = tail >> nbits
|
||||
var code uint = uint((nbits << 1) + uint32(prefix) + 20)
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
||||
histo[code]++
|
||||
} else if copylen < 2118 {
|
||||
var tail uint = copylen - 70
|
||||
var nbits uint32 = log2FloorNonZero(tail)
|
||||
var code uint = uint(nbits + 28)
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
||||
histo[code]++
|
||||
} else {
|
||||
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
|
||||
writeBits(24, uint64(copylen)-2118, storage_ix, storage)
|
||||
bw.writeBits(uint(depth[39]), uint64(bits[39]))
|
||||
bw.writeBits(24, uint64(copylen)-2118)
|
||||
histo[39]++
|
||||
}
|
||||
}
|
||||
|
||||
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||
func emitCopyLenLastDistance1(copylen uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||
if copylen < 12 {
|
||||
writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[copylen-4]), uint64(bits[copylen-4]))
|
||||
histo[copylen-4]++
|
||||
} else if copylen < 72 {
|
||||
var tail uint = copylen - 8
|
||||
var nbits uint32 = log2FloorNonZero(tail) - 1
|
||||
var prefix uint = tail >> nbits
|
||||
var code uint = uint((nbits << 1) + uint32(prefix) + 4)
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<<nbits))
|
||||
histo[code]++
|
||||
} else if copylen < 136 {
|
||||
var tail uint = copylen - 8
|
||||
var code uint = (tail >> 5) + 30
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
writeBits(5, uint64(tail)&31, storage_ix, storage)
|
||||
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
bw.writeBits(5, uint64(tail)&31)
|
||||
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
||||
histo[code]++
|
||||
histo[64]++
|
||||
} else if copylen < 2120 {
|
||||
var tail uint = copylen - 72
|
||||
var nbits uint32 = log2FloorNonZero(tail)
|
||||
var code uint = uint(nbits + 28)
|
||||
writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits), storage_ix, storage)
|
||||
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[code]), uint64(bits[code]))
|
||||
bw.writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<<nbits))
|
||||
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
||||
histo[code]++
|
||||
histo[64]++
|
||||
} else {
|
||||
writeBits(uint(depth[39]), uint64(bits[39]), storage_ix, storage)
|
||||
writeBits(24, uint64(copylen)-2120, storage_ix, storage)
|
||||
writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[39]), uint64(bits[39]))
|
||||
bw.writeBits(24, uint64(copylen)-2120)
|
||||
bw.writeBits(uint(depth[64]), uint64(bits[64]))
|
||||
histo[39]++
|
||||
histo[64]++
|
||||
}
|
||||
}
|
||||
|
||||
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) {
|
||||
func emitDistance1(distance uint, depth []byte, bits []uint16, histo []uint32, bw *bitWriter) {
|
||||
var d uint = distance + 3
|
||||
var nbits uint32 = log2FloorNonZero(d) - 1
|
||||
var prefix uint = (d >> nbits) & 1
|
||||
var offset uint = (2 + prefix) << nbits
|
||||
var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80)
|
||||
writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage)
|
||||
writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[distcode]), uint64(bits[distcode]))
|
||||
bw.writeBits(uint(nbits), uint64(d)-uint64(offset))
|
||||
histo[distcode]++
|
||||
}
|
||||
|
||||
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||
func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, bw *bitWriter) {
|
||||
var j uint
|
||||
for j = 0; j < len; j++ {
|
||||
var lit byte = input[j]
|
||||
writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage)
|
||||
bw.writeBits(uint(depth[lit]), uint64(bits[lit]))
|
||||
}
|
||||
}
|
||||
|
||||
/* REQUIRES: len <= 1 << 24. */
|
||||
func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
|
||||
func storeMetaBlockHeader1(len uint, is_uncompressed bool, bw *bitWriter) {
|
||||
var nibbles uint = 6
|
||||
|
||||
/* ISLAST */
|
||||
writeBits(1, 0, storage_ix, storage)
|
||||
bw.writeBits(1, 0)
|
||||
|
||||
if len <= 1<<16 {
|
||||
nibbles = 4
|
||||
@@ -293,34 +287,11 @@ func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, sto
|
||||
nibbles = 5
|
||||
}
|
||||
|
||||
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
|
||||
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
|
||||
bw.writeBits(2, uint64(nibbles)-4)
|
||||
bw.writeBits(nibbles*4, uint64(len)-1)
|
||||
|
||||
/* ISUNCOMPRESSED */
|
||||
writeSingleBit(is_uncompressed, storage_ix, storage)
|
||||
}
|
||||
|
||||
func updateBits(n_bits uint, bits uint32, pos uint, array []byte) {
|
||||
for n_bits > 0 {
|
||||
var byte_pos uint = pos >> 3
|
||||
var n_unchanged_bits uint = pos & 7
|
||||
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
|
||||
var total_bits uint = n_unchanged_bits + n_changed_bits
|
||||
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
|
||||
var unchanged_bits uint32 = uint32(array[byte_pos]) & mask
|
||||
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
|
||||
array[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
|
||||
n_bits -= n_changed_bits
|
||||
bits >>= n_changed_bits
|
||||
pos += n_changed_bits
|
||||
}
|
||||
}
|
||||
|
||||
func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) {
|
||||
var bitpos uint = new_storage_ix & 7
|
||||
var mask uint = (1 << bitpos) - 1
|
||||
storage[new_storage_ix>>3] &= byte(mask)
|
||||
*storage_ix = new_storage_ix
|
||||
bw.writeSingleBit(is_uncompressed)
|
||||
}
|
||||
|
||||
var shouldMergeBlock_kSampleRate uint = 43
|
||||
@@ -351,151 +322,26 @@ func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertl
|
||||
}
|
||||
}
|
||||
|
||||
func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) {
|
||||
var len uint = uint(-cap(end) + cap(begin))
|
||||
rewindBitPosition1(storage_ix_start, storage_ix, storage)
|
||||
storeMetaBlockHeader1(uint(len), true, storage_ix, storage)
|
||||
*storage_ix = (*storage_ix + 7) &^ 7
|
||||
copy(storage[*storage_ix>>3:], begin[:len])
|
||||
*storage_ix += uint(len << 3)
|
||||
storage[*storage_ix>>3] = 0
|
||||
func emitUncompressedMetaBlock1(data []byte, storage_ix_start uint, bw *bitWriter) {
|
||||
bw.rewind(storage_ix_start)
|
||||
storeMetaBlockHeader1(uint(len(data)), true, bw)
|
||||
bw.jumpToByteBoundary()
|
||||
bw.writeBytes(data)
|
||||
}
|
||||
|
||||
var kCmdHistoSeed = [128]uint32{
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 0, 0, 0, 0,
|
||||
}
|
||||
|
||||
var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15
|
||||
var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16
|
||||
|
||||
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
|
||||
func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
|
||||
var cmd_histo [128]uint32
|
||||
var ip_end int
|
||||
var next_emit int = 0
|
||||
@@ -506,7 +352,7 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
|
||||
var metablock_start int = input
|
||||
var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize)
|
||||
var total_block_size uint = block_size
|
||||
var mlen_storage_ix uint = *storage_ix + 3
|
||||
var mlen_storage_ix uint = bw.getPos() + 3
|
||||
var lit_depth [256]byte
|
||||
var lit_bits [256]uint16
|
||||
var literal_ratio uint
|
||||
@@ -523,21 +369,21 @@ func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []
|
||||
|
||||
/* Save the bit position of the MLEN field of the meta-block header, so that
|
||||
we can update it later if we decide to extend this meta-block. */
|
||||
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
|
||||
storeMetaBlockHeader1(block_size, false, bw)
|
||||
|
||||
/* No block splits, no contexts. */
|
||||
writeBits(13, 0, storage_ix, storage)
|
||||
bw.writeBits(13, 0)
|
||||
|
||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
|
||||
{
|
||||
/* Store the pre-compressed command and distance prefix codes. */
|
||||
var i uint
|
||||
for i = 0; i+7 < *cmd_code_numbits; i += 8 {
|
||||
writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage)
|
||||
bw.writeBits(8, uint64(cmd_code[i>>3]))
|
||||
}
|
||||
}
|
||||
|
||||
writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage)
|
||||
bw.writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]))
|
||||
|
||||
/* Initialize the command and distance histograms. We will gather
|
||||
statistics of command and distance codes during the processing
|
||||
@@ -636,27 +482,27 @@ emit_commands:
|
||||
var insert uint = uint(base - next_emit)
|
||||
ip += int(matched)
|
||||
if insert < 6210 {
|
||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
||||
emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage)
|
||||
emitUncompressedMetaBlock1(in[metablock_start:base], mlen_storage_ix-3, bw)
|
||||
input_size -= uint(base - input)
|
||||
input = base
|
||||
next_emit = input
|
||||
goto next_block
|
||||
} else {
|
||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
}
|
||||
|
||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
||||
if distance == last_distance {
|
||||
writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage)
|
||||
bw.writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]))
|
||||
cmd_histo[64]++
|
||||
} else {
|
||||
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
last_distance = distance
|
||||
}
|
||||
|
||||
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
|
||||
next_emit = ip
|
||||
if ip >= ip_limit {
|
||||
@@ -692,8 +538,8 @@ emit_commands:
|
||||
}
|
||||
ip += int(matched)
|
||||
last_distance = int(base - candidate) /* > 0 */
|
||||
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
|
||||
next_emit = ip
|
||||
if ip >= ip_limit {
|
||||
@@ -739,7 +585,7 @@ emit_remainder:
|
||||
nibbles. */
|
||||
total_block_size += block_size
|
||||
|
||||
updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage)
|
||||
bw.updateBits(20, uint32(total_block_size-1), mlen_storage_ix)
|
||||
goto emit_commands
|
||||
}
|
||||
|
||||
@@ -747,13 +593,13 @@ emit_remainder:
|
||||
if next_emit < ip_end {
|
||||
var insert uint = uint(ip_end - next_emit)
|
||||
if insert < 6210 {
|
||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||
emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
||||
} else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) {
|
||||
emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage)
|
||||
emitUncompressedMetaBlock1(in[metablock_start:ip_end], mlen_storage_ix-3, bw)
|
||||
} else {
|
||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage)
|
||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||
emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], bw)
|
||||
emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], bw)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -769,30 +615,29 @@ next_block:
|
||||
|
||||
/* Save the bit position of the MLEN field of the meta-block header, so that
|
||||
we can update it later if we decide to extend this meta-block. */
|
||||
mlen_storage_ix = *storage_ix + 3
|
||||
mlen_storage_ix = bw.getPos() + 3
|
||||
|
||||
storeMetaBlockHeader1(block_size, false, storage_ix, storage)
|
||||
storeMetaBlockHeader1(block_size, false, bw)
|
||||
|
||||
/* No block splits, no contexts. */
|
||||
writeBits(13, 0, storage_ix, storage)
|
||||
bw.writeBits(13, 0)
|
||||
|
||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage)
|
||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage)
|
||||
literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], bw)
|
||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, bw)
|
||||
goto emit_commands
|
||||
}
|
||||
|
||||
if !is_last {
|
||||
/* If this is not the last block, update the command and distance prefix
|
||||
codes for the next block and store the compressed forms. */
|
||||
cmd_code[0] = 0
|
||||
|
||||
*cmd_code_numbits = 0
|
||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code)
|
||||
var bw bitWriter
|
||||
bw.dst = cmd_code
|
||||
buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, &bw)
|
||||
*cmd_code_numbits = bw.getPos()
|
||||
}
|
||||
}
|
||||
|
||||
/* Compresses "input" string to the "*storage" buffer as one or more complete
|
||||
meta-blocks, and updates the "*storage_ix" bit position.
|
||||
/* Compresses "input" string to bw as one or more complete meta-blocks.
|
||||
|
||||
If "is_last" is 1, emits an additional empty last meta-block.
|
||||
|
||||
@@ -813,28 +658,28 @@ next_block:
|
||||
REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two
|
||||
OUTPUT: maximal copy distance <= |input_size|
|
||||
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
||||
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) {
|
||||
var initial_storage_ix uint = *storage_ix
|
||||
func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, bw *bitWriter) {
|
||||
var initial_storage_ix uint = bw.getPos()
|
||||
var table_bits uint = uint(log2FloorNonZero(table_size))
|
||||
|
||||
if input_size == 0 {
|
||||
assert(is_last)
|
||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||
*storage_ix = (*storage_ix + 7) &^ 7
|
||||
bw.writeBits(1, 1) /* islast */
|
||||
bw.writeBits(1, 1) /* isempty */
|
||||
bw.jumpToByteBoundary()
|
||||
return
|
||||
}
|
||||
|
||||
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage)
|
||||
compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, bw)
|
||||
|
||||
/* If output is larger than single uncompressed block, rewrite it. */
|
||||
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
|
||||
emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage)
|
||||
if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
|
||||
emitUncompressedMetaBlock1(input[:input_size], initial_storage_ix, bw)
|
||||
}
|
||||
|
||||
if is_last {
|
||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||
*storage_ix = (*storage_ix + 7) &^ 7
|
||||
bw.writeBits(1, 1) /* islast */
|
||||
bw.writeBits(1, 1) /* isempty */
|
||||
bw.jumpToByteBoundary()
|
||||
}
|
||||
}
|
||||
|
||||
250
vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
generated
vendored
250
vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
generated
vendored
@@ -30,19 +30,18 @@ func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 {
|
||||
}
|
||||
|
||||
func isMatch1(p1 []byte, p2 []byte, length uint) bool {
|
||||
var i uint
|
||||
for i = 0; i < length && i < 6; i++ {
|
||||
if p1[i] != p2[i] {
|
||||
return false
|
||||
}
|
||||
if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
if length == 4 {
|
||||
return true
|
||||
}
|
||||
return p1[4] == p2[4] && p1[5] == p2[5]
|
||||
}
|
||||
|
||||
/* Builds a command and distance prefix code (each 64 symbols) into "depth" and
|
||||
"bits" based on "histogram" and stores it into the bit stream. */
|
||||
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) {
|
||||
func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, bw *bitWriter) {
|
||||
var tree [129]huffmanTree
|
||||
var cmd_depth = [numCommandSymbols]byte{0}
|
||||
/* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */
|
||||
@@ -88,10 +87,10 @@ func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uin
|
||||
cmd_depth[448+8*i] = depth[16+i]
|
||||
}
|
||||
|
||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage)
|
||||
storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], bw)
|
||||
}
|
||||
|
||||
storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage)
|
||||
storeHuffmanTree(depth[64:], 64, tree[:], bw)
|
||||
}
|
||||
|
||||
func emitInsertLen(insertlen uint32, commands *[]uint32) {
|
||||
@@ -198,11 +197,11 @@ func emitDistance(distance uint32, commands *[]uint32) {
|
||||
}
|
||||
|
||||
/* REQUIRES: len <= 1 << 24. */
|
||||
func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) {
|
||||
func storeMetaBlockHeader(len uint, is_uncompressed bool, bw *bitWriter) {
|
||||
var nibbles uint = 6
|
||||
|
||||
/* ISLAST */
|
||||
writeBits(1, 0, storage_ix, storage)
|
||||
bw.writeBits(1, 0)
|
||||
|
||||
if len <= 1<<16 {
|
||||
nibbles = 4
|
||||
@@ -210,11 +209,11 @@ func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, stor
|
||||
nibbles = 5
|
||||
}
|
||||
|
||||
writeBits(2, uint64(nibbles)-4, storage_ix, storage)
|
||||
writeBits(nibbles*4, uint64(len)-1, storage_ix, storage)
|
||||
bw.writeBits(2, uint64(nibbles)-4)
|
||||
bw.writeBits(nibbles*4, uint64(len)-1)
|
||||
|
||||
/* ISUNCOMPRESSED */
|
||||
writeSingleBit(is_uncompressed, storage_ix, storage)
|
||||
bw.writeSingleBit(is_uncompressed)
|
||||
}
|
||||
|
||||
func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) {
|
||||
@@ -441,163 +440,20 @@ emit_remainder:
|
||||
}
|
||||
|
||||
var storeCommands_kNumExtraBits = [128]uint32{
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
5,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
12,
|
||||
14,
|
||||
24,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
5,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
24,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
2,
|
||||
2,
|
||||
3,
|
||||
3,
|
||||
4,
|
||||
4,
|
||||
5,
|
||||
5,
|
||||
6,
|
||||
6,
|
||||
7,
|
||||
7,
|
||||
8,
|
||||
8,
|
||||
9,
|
||||
9,
|
||||
10,
|
||||
10,
|
||||
11,
|
||||
11,
|
||||
12,
|
||||
12,
|
||||
13,
|
||||
13,
|
||||
14,
|
||||
14,
|
||||
15,
|
||||
15,
|
||||
16,
|
||||
16,
|
||||
17,
|
||||
17,
|
||||
18,
|
||||
18,
|
||||
19,
|
||||
19,
|
||||
20,
|
||||
20,
|
||||
21,
|
||||
21,
|
||||
22,
|
||||
22,
|
||||
23,
|
||||
23,
|
||||
24,
|
||||
24,
|
||||
0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 12, 14, 24,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 9, 10, 24,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
|
||||
9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16,
|
||||
17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24,
|
||||
}
|
||||
var storeCommands_kInsertOffset = [24]uint32{
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
8,
|
||||
10,
|
||||
14,
|
||||
18,
|
||||
26,
|
||||
34,
|
||||
50,
|
||||
66,
|
||||
98,
|
||||
130,
|
||||
194,
|
||||
322,
|
||||
578,
|
||||
1090,
|
||||
2114,
|
||||
6210,
|
||||
22594,
|
||||
0, 1, 2, 3, 4, 5, 6, 8, 10, 14, 18, 26, 34, 50, 66, 98, 130, 194, 322, 578,
|
||||
1090, 2114, 6210, 22594,
|
||||
}
|
||||
|
||||
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) {
|
||||
func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, bw *bitWriter) {
|
||||
var lit_depths [256]byte
|
||||
var lit_bits [256]uint16
|
||||
var lit_histo = [256]uint32{0}
|
||||
@@ -610,7 +466,7 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
|
||||
}
|
||||
|
||||
buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */
|
||||
8, lit_depths[:], lit_bits[:], storage_ix, storage)
|
||||
8, lit_depths[:], lit_bits[:], bw)
|
||||
|
||||
for i = 0; i < num_commands; i++ {
|
||||
var code uint32 = commands[i] & 0xFF
|
||||
@@ -622,21 +478,21 @@ func storeCommands(literals []byte, num_literals uint, commands []uint32, num_co
|
||||
cmd_histo[2] += 1
|
||||
cmd_histo[64] += 1
|
||||
cmd_histo[84] += 1
|
||||
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage)
|
||||
buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], bw)
|
||||
|
||||
for i = 0; i < num_commands; i++ {
|
||||
var cmd uint32 = commands[i]
|
||||
var code uint32 = cmd & 0xFF
|
||||
var extra uint32 = cmd >> 8
|
||||
assert(code < 128)
|
||||
writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage)
|
||||
writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage)
|
||||
bw.writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]))
|
||||
bw.writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra))
|
||||
if code < 24 {
|
||||
var insert uint32 = storeCommands_kInsertOffset[code] + extra
|
||||
var j uint32
|
||||
for j = 0; j < insert; j++ {
|
||||
var lit byte = literals[0]
|
||||
writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage)
|
||||
bw.writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]))
|
||||
literals = literals[1:]
|
||||
}
|
||||
}
|
||||
@@ -664,22 +520,13 @@ func shouldCompress(input []byte, input_size uint, num_literals uint) bool {
|
||||
}
|
||||
}
|
||||
|
||||
func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) {
|
||||
var bitpos uint = new_storage_ix & 7
|
||||
var mask uint = (1 << bitpos) - 1
|
||||
storage[new_storage_ix>>3] &= byte(mask)
|
||||
*storage_ix = new_storage_ix
|
||||
func emitUncompressedMetaBlock(input []byte, input_size uint, bw *bitWriter) {
|
||||
storeMetaBlockHeader(input_size, true, bw)
|
||||
bw.jumpToByteBoundary()
|
||||
bw.writeBytes(input[:input_size])
|
||||
}
|
||||
|
||||
func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) {
|
||||
storeMetaBlockHeader(input_size, true, storage_ix, storage)
|
||||
*storage_ix = (*storage_ix + 7) &^ 7
|
||||
copy(storage[*storage_ix>>3:], input[:input_size])
|
||||
*storage_ix += input_size << 3
|
||||
storage[*storage_ix>>3] = 0
|
||||
}
|
||||
|
||||
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) {
|
||||
func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, bw *bitWriter) {
|
||||
/* Save the start of the first block for position and distance computations.
|
||||
*/
|
||||
var base_ip []byte = input
|
||||
@@ -693,17 +540,17 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||
num_literals = uint(-cap(literals) + cap(literal_buf))
|
||||
if shouldCompress(input, block_size, num_literals) {
|
||||
var num_commands uint = uint(-cap(commands) + cap(command_buf))
|
||||
storeMetaBlockHeader(block_size, false, storage_ix, storage)
|
||||
storeMetaBlockHeader(block_size, false, bw)
|
||||
|
||||
/* No block splits, no contexts. */
|
||||
writeBits(13, 0, storage_ix, storage)
|
||||
bw.writeBits(13, 0)
|
||||
|
||||
storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage)
|
||||
storeCommands(literal_buf, num_literals, command_buf, num_commands, bw)
|
||||
} else {
|
||||
/* Since we did not find many backward references and the entropy of
|
||||
the data is close to 8 bits, we can simply emit an uncompressed block.
|
||||
This makes compression speed of uncompressible data about 3x faster. */
|
||||
emitUncompressedMetaBlock(input, block_size, storage_ix, storage)
|
||||
emitUncompressedMetaBlock(input, block_size, bw)
|
||||
}
|
||||
|
||||
input = input[block_size:]
|
||||
@@ -711,8 +558,7 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||
}
|
||||
}
|
||||
|
||||
/* Compresses "input" string to the "*storage" buffer as one or more complete
|
||||
meta-blocks, and updates the "*storage_ix" bit position.
|
||||
/* Compresses "input" string to bw as one or more complete meta-blocks.
|
||||
|
||||
If "is_last" is 1, emits an additional empty last meta-block.
|
||||
|
||||
@@ -724,8 +570,8 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co
|
||||
REQUIRES: "table_size" is a power of two
|
||||
OUTPUT: maximal copy distance <= |input_size|
|
||||
OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */
|
||||
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) {
|
||||
var initial_storage_ix uint = *storage_ix
|
||||
func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, bw *bitWriter) {
|
||||
var initial_storage_ix uint = bw.getPos()
|
||||
var table_bits uint = uint(log2FloorNonZero(table_size))
|
||||
var min_match uint
|
||||
if table_bits <= 15 {
|
||||
@@ -733,17 +579,17 @@ func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, comman
|
||||
} else {
|
||||
min_match = 6
|
||||
}
|
||||
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage)
|
||||
compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, bw)
|
||||
|
||||
/* If output is larger than single uncompressed block, rewrite it. */
|
||||
if *storage_ix-initial_storage_ix > 31+(input_size<<3) {
|
||||
rewindBitPosition(initial_storage_ix, storage_ix, storage)
|
||||
emitUncompressedMetaBlock(input, input_size, storage_ix, storage)
|
||||
if bw.getPos()-initial_storage_ix > 31+(input_size<<3) {
|
||||
bw.rewind(initial_storage_ix)
|
||||
emitUncompressedMetaBlock(input, input_size, bw)
|
||||
}
|
||||
|
||||
if is_last {
|
||||
writeBits(1, 1, storage_ix, storage) /* islast */
|
||||
writeBits(1, 1, storage_ix, storage) /* isempty */
|
||||
*storage_ix = (*storage_ix + 7) &^ 7
|
||||
bw.writeBits(1, 1) /* islast */
|
||||
bw.writeBits(1, 1) /* isempty */
|
||||
bw.jumpToByteBoundary()
|
||||
}
|
||||
}
|
||||
|
||||
932
vendor/github.com/andybalholm/brotli/encode.go
generated
vendored
932
vendor/github.com/andybalholm/brotli/encode.go
generated
vendored
File diff suppressed because it is too large
Load Diff
13
vendor/github.com/andybalholm/brotli/entropy_encode.go
generated
vendored
13
vendor/github.com/andybalholm/brotli/entropy_encode.go
generated
vendored
@@ -24,7 +24,7 @@ func initHuffmanTree(self *huffmanTree, count uint32, left int16, right int16) {
|
||||
}
|
||||
|
||||
/* Input size optimized Shell sort. */
|
||||
type huffmanTreeComparator func(*huffmanTree, *huffmanTree) bool
|
||||
type huffmanTreeComparator func(huffmanTree, huffmanTree) bool
|
||||
|
||||
var sortHuffmanTreeItems_gaps = []uint{132, 57, 23, 10, 4, 1}
|
||||
|
||||
@@ -36,14 +36,13 @@ func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeCom
|
||||
var tmp huffmanTree = items[i]
|
||||
var k uint = i
|
||||
var j uint = i - 1
|
||||
for comparator(&tmp, &items[j]) {
|
||||
for comparator(tmp, items[j]) {
|
||||
items[k] = items[j]
|
||||
k = j
|
||||
tmp10 := j
|
||||
j--
|
||||
if tmp10 == 0 {
|
||||
if j == 0 {
|
||||
break
|
||||
}
|
||||
j--
|
||||
}
|
||||
|
||||
items[k] = tmp
|
||||
@@ -63,7 +62,7 @@ func sortHuffmanTreeItems(items []huffmanTree, n uint, comparator huffmanTreeCom
|
||||
for i = gap; i < n; i++ {
|
||||
var j uint = i
|
||||
var tmp huffmanTree = items[i]
|
||||
for ; j >= gap && comparator(&tmp, &items[j-gap]); j -= gap {
|
||||
for ; j >= gap && comparator(tmp, items[j-gap]); j -= gap {
|
||||
items[j] = items[j-gap]
|
||||
}
|
||||
|
||||
@@ -105,7 +104,7 @@ func setDepth(p0 int, pool []huffmanTree, depth []byte, max_depth int) bool {
|
||||
}
|
||||
|
||||
/* Sort the root nodes, least popular first. */
|
||||
func sortHuffmanTree(v0 *huffmanTree, v1 *huffmanTree) bool {
|
||||
func sortHuffmanTree(v0 huffmanTree, v1 huffmanTree) bool {
|
||||
if v0.total_count_ != v1.total_count_ {
|
||||
return v0.total_count_ < v1.total_count_
|
||||
}
|
||||
|
||||
16
vendor/github.com/andybalholm/brotli/entropy_encode_static.go
generated
vendored
16
vendor/github.com/andybalholm/brotli/entropy_encode_static.go
generated
vendored
@@ -778,8 +778,9 @@ var kStaticDistanceCodeDepth = [64]byte{
|
||||
|
||||
var kCodeLengthBits = [18]uint32{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 15, 31, 0, 11, 7}
|
||||
|
||||
func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) {
|
||||
writeBits(40, 0x0000FF55555554, storage_ix, storage)
|
||||
func storeStaticCodeLengthCode(bw *bitWriter) {
|
||||
bw.writeBits(32, 0x55555554)
|
||||
bw.writeBits(8, 0xFF)
|
||||
}
|
||||
|
||||
var kZeroRepsBits = [numCommandSymbols]uint64{
|
||||
@@ -4317,9 +4318,10 @@ var kStaticCommandCodeBits = [numCommandSymbols]uint16{
|
||||
2047,
|
||||
}
|
||||
|
||||
func storeStaticCommandHuffmanTree(storage_ix *uint, storage []byte) {
|
||||
writeBits(56, 0x92624416307003, storage_ix, storage)
|
||||
writeBits(3, 0x00000000, storage_ix, storage)
|
||||
func storeStaticCommandHuffmanTree(bw *bitWriter) {
|
||||
bw.writeBits(32, 0x16307003)
|
||||
bw.writeBits(24, 0x926244)
|
||||
bw.writeBits(3, 0x00000000)
|
||||
}
|
||||
|
||||
var kStaticDistanceCodeBits = [64]uint16{
|
||||
@@ -4389,6 +4391,6 @@ var kStaticDistanceCodeBits = [64]uint16{
|
||||
63,
|
||||
}
|
||||
|
||||
func storeStaticDistanceHuffmanTree(storage_ix *uint, storage []byte) {
|
||||
writeBits(28, 0x0369DC03, storage_ix, storage)
|
||||
func storeStaticDistanceHuffmanTree(bw *bitWriter) {
|
||||
bw.writeBits(28, 0x0369DC03)
|
||||
}
|
||||
|
||||
29
vendor/github.com/andybalholm/brotli/find_match_length.go
generated
vendored
29
vendor/github.com/andybalholm/brotli/find_match_length.go
generated
vendored
@@ -1,5 +1,11 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
/* Copyright 2010 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
@@ -9,6 +15,29 @@ package brotli
|
||||
/* Function to find maximal matching prefixes of strings. */
|
||||
func findMatchLengthWithLimit(s1 []byte, s2 []byte, limit uint) uint {
|
||||
var matched uint = 0
|
||||
_, _ = s1[limit-1], s2[limit-1] // bounds check
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
// Compare 8 bytes at at time.
|
||||
for matched+8 <= limit {
|
||||
w1 := binary.LittleEndian.Uint64(s1[matched:])
|
||||
w2 := binary.LittleEndian.Uint64(s2[matched:])
|
||||
if w1 != w2 {
|
||||
return matched + uint(bits.TrailingZeros64(w1^w2)>>3)
|
||||
}
|
||||
matched += 8
|
||||
}
|
||||
case "386":
|
||||
// Compare 4 bytes at at time.
|
||||
for matched+4 <= limit {
|
||||
w1 := binary.LittleEndian.Uint32(s1[matched:])
|
||||
w2 := binary.LittleEndian.Uint32(s2[matched:])
|
||||
if w1 != w2 {
|
||||
return matched + uint(bits.TrailingZeros32(w1^w2)>>3)
|
||||
}
|
||||
matched += 4
|
||||
}
|
||||
}
|
||||
for matched < limit && s1[matched] == s2[matched] {
|
||||
matched++
|
||||
}
|
||||
|
||||
7
vendor/github.com/andybalholm/brotli/histogram.go
generated
vendored
7
vendor/github.com/andybalholm/brotli/histogram.go
generated
vendored
@@ -163,7 +163,7 @@ func initBlockSplitIterator(self *blockSplitIterator, split *blockSplit) {
|
||||
self.split_ = split
|
||||
self.idx_ = 0
|
||||
self.type_ = 0
|
||||
if split.lengths != nil {
|
||||
if len(split.lengths) > 0 {
|
||||
self.length_ = uint(split.lengths[0])
|
||||
} else {
|
||||
self.length_ = 0
|
||||
@@ -180,17 +180,16 @@ func blockSplitIteratorNext(self *blockSplitIterator) {
|
||||
self.length_--
|
||||
}
|
||||
|
||||
func buildHistogramsWithContext(cmds []command, num_commands uint, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) {
|
||||
func buildHistogramsWithContext(cmds []command, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit, ringbuffer []byte, start_pos uint, mask uint, prev_byte byte, prev_byte2 byte, context_modes []int, literal_histograms []histogramLiteral, insert_and_copy_histograms []histogramCommand, copy_dist_histograms []histogramDistance) {
|
||||
var pos uint = start_pos
|
||||
var literal_it blockSplitIterator
|
||||
var insert_and_copy_it blockSplitIterator
|
||||
var dist_it blockSplitIterator
|
||||
var i uint
|
||||
|
||||
initBlockSplitIterator(&literal_it, literal_split)
|
||||
initBlockSplitIterator(&insert_and_copy_it, insert_and_copy_split)
|
||||
initBlockSplitIterator(&dist_it, dist_split)
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range cmds {
|
||||
var cmd *command = &cmds[i]
|
||||
var j uint
|
||||
blockSplitIteratorNext(&insert_and_copy_it)
|
||||
|
||||
28
vendor/github.com/andybalholm/brotli/memory.go
generated
vendored
28
vendor/github.com/andybalholm/brotli/memory.go
generated
vendored
@@ -23,12 +23,18 @@ func brotli_ensure_capacity_uint8_t(a *[]byte, c *uint, r uint) {
|
||||
for new_size < r {
|
||||
new_size *= 2
|
||||
}
|
||||
var new_array []byte = make([]byte, new_size)
|
||||
if *c != 0 {
|
||||
copy(new_array, (*a)[:*c])
|
||||
|
||||
if cap(*a) < int(new_size) {
|
||||
var new_array []byte = make([]byte, new_size)
|
||||
if *c != 0 {
|
||||
copy(new_array, (*a)[:*c])
|
||||
}
|
||||
|
||||
*a = new_array
|
||||
} else {
|
||||
*a = (*a)[:new_size]
|
||||
}
|
||||
|
||||
*a = new_array
|
||||
*c = new_size
|
||||
}
|
||||
}
|
||||
@@ -45,12 +51,16 @@ func brotli_ensure_capacity_uint32_t(a *[]uint32, c *uint, r uint) {
|
||||
new_size *= 2
|
||||
}
|
||||
|
||||
new_array = make([]uint32, new_size)
|
||||
if *c != 0 {
|
||||
copy(new_array, (*a)[:*c])
|
||||
}
|
||||
if cap(*a) < int(new_size) {
|
||||
new_array = make([]uint32, new_size)
|
||||
if *c != 0 {
|
||||
copy(new_array, (*a)[:*c])
|
||||
}
|
||||
|
||||
*a = new_array
|
||||
*a = new_array
|
||||
} else {
|
||||
*a = (*a)[:new_size]
|
||||
}
|
||||
*c = new_size
|
||||
}
|
||||
}
|
||||
|
||||
141
vendor/github.com/andybalholm/brotli/metablock.go
generated
vendored
141
vendor/github.com/andybalholm/brotli/metablock.go
generated
vendored
@@ -1,5 +1,9 @@
|
||||
package brotli
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
/* Copyright 2014 Google Inc. All Rights Reserved.
|
||||
|
||||
Distributed under MIT license.
|
||||
@@ -25,31 +29,30 @@ type metaBlockSplit struct {
|
||||
distance_histograms_size uint
|
||||
}
|
||||
|
||||
func initMetaBlockSplit(mb *metaBlockSplit) {
|
||||
initBlockSplit(&mb.literal_split)
|
||||
initBlockSplit(&mb.command_split)
|
||||
initBlockSplit(&mb.distance_split)
|
||||
mb.literal_context_map = nil
|
||||
mb.literal_context_map_size = 0
|
||||
mb.distance_context_map = nil
|
||||
mb.distance_context_map_size = 0
|
||||
mb.literal_histograms = nil
|
||||
mb.literal_histograms_size = 0
|
||||
mb.command_histograms = nil
|
||||
mb.command_histograms_size = 0
|
||||
mb.distance_histograms = nil
|
||||
mb.distance_histograms_size = 0
|
||||
var metaBlockPool sync.Pool
|
||||
|
||||
func getMetaBlockSplit() *metaBlockSplit {
|
||||
mb, _ := metaBlockPool.Get().(*metaBlockSplit)
|
||||
|
||||
if mb == nil {
|
||||
mb = &metaBlockSplit{}
|
||||
} else {
|
||||
initBlockSplit(&mb.literal_split)
|
||||
initBlockSplit(&mb.command_split)
|
||||
initBlockSplit(&mb.distance_split)
|
||||
mb.literal_context_map = mb.literal_context_map[:0]
|
||||
mb.literal_context_map_size = 0
|
||||
mb.distance_context_map = mb.distance_context_map[:0]
|
||||
mb.distance_context_map_size = 0
|
||||
mb.literal_histograms = mb.literal_histograms[:0]
|
||||
mb.command_histograms = mb.command_histograms[:0]
|
||||
mb.distance_histograms = mb.distance_histograms[:0]
|
||||
}
|
||||
return mb
|
||||
}
|
||||
|
||||
func destroyMetaBlockSplit(mb *metaBlockSplit) {
|
||||
destroyBlockSplit(&mb.literal_split)
|
||||
destroyBlockSplit(&mb.command_split)
|
||||
destroyBlockSplit(&mb.distance_split)
|
||||
mb.literal_context_map = nil
|
||||
mb.distance_context_map = nil
|
||||
mb.literal_histograms = nil
|
||||
mb.command_histograms = nil
|
||||
mb.distance_histograms = nil
|
||||
func freeMetaBlockSplit(mb *metaBlockSplit) {
|
||||
metaBlockPool.Put(mb)
|
||||
}
|
||||
|
||||
func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32) {
|
||||
@@ -84,14 +87,12 @@ func initDistanceParams(params *encoderParams, npostfix uint32, ndirect uint32)
|
||||
dist_params.max_distance = uint(max_distance)
|
||||
}
|
||||
|
||||
func recomputeDistancePrefixes(cmds []command, num_commands uint, orig_params *distanceParams, new_params *distanceParams) {
|
||||
var i uint
|
||||
|
||||
func recomputeDistancePrefixes(cmds []command, orig_params *distanceParams, new_params *distanceParams) {
|
||||
if orig_params.distance_postfix_bits == new_params.distance_postfix_bits && orig_params.num_direct_distance_codes == new_params.num_direct_distance_codes {
|
||||
return
|
||||
}
|
||||
|
||||
for i = 0; i < num_commands; i++ {
|
||||
for i := range cmds {
|
||||
var cmd *command = &cmds[i]
|
||||
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
|
||||
prefixEncodeCopyDistance(uint(commandRestoreDistanceCode(cmd, orig_params)), uint(new_params.num_direct_distance_codes), uint(new_params.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_)
|
||||
@@ -99,8 +100,7 @@ func recomputeDistancePrefixes(cmds []command, num_commands uint, orig_params *d
|
||||
}
|
||||
}
|
||||
|
||||
func computeDistanceCost(cmds []command, num_commands uint, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool {
|
||||
var i uint
|
||||
func computeDistanceCost(cmds []command, orig_params *distanceParams, new_params *distanceParams, cost *float64) bool {
|
||||
var equal_params bool = false
|
||||
var dist_prefix uint16
|
||||
var dist_extra uint32
|
||||
@@ -112,8 +112,8 @@ func computeDistanceCost(cmds []command, num_commands uint, orig_params *distanc
|
||||
equal_params = true
|
||||
}
|
||||
|
||||
for i = 0; i < num_commands; i++ {
|
||||
var cmd *command = &cmds[i]
|
||||
for i := range cmds {
|
||||
cmd := &cmds[i]
|
||||
if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 {
|
||||
if equal_params {
|
||||
dist_prefix = cmd.dist_prefix_
|
||||
@@ -137,7 +137,7 @@ func computeDistanceCost(cmds []command, num_commands uint, orig_params *distanc
|
||||
|
||||
var buildMetaBlock_kMaxNumberOfHistograms uint = 256
|
||||
|
||||
func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, num_commands uint, literal_context_mode int, mb *metaBlockSplit) {
|
||||
func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParams, prev_byte byte, prev_byte2 byte, cmds []command, literal_context_mode int, mb *metaBlockSplit) {
|
||||
var distance_histograms []histogramDistance
|
||||
var literal_histograms []histogramLiteral
|
||||
var literal_context_modes []int = nil
|
||||
@@ -164,7 +164,7 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||
check_orig = false
|
||||
}
|
||||
|
||||
skip = !computeDistanceCost(cmds, num_commands, &orig_params.dist, &new_params.dist, &dist_cost)
|
||||
skip = !computeDistanceCost(cmds, &orig_params.dist, &new_params.dist, &dist_cost)
|
||||
if skip || (dist_cost > best_dist_cost) {
|
||||
break
|
||||
}
|
||||
@@ -181,7 +181,7 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||
|
||||
if check_orig {
|
||||
var dist_cost float64
|
||||
computeDistanceCost(cmds, num_commands, &orig_params.dist, &orig_params.dist, &dist_cost)
|
||||
computeDistanceCost(cmds, &orig_params.dist, &orig_params.dist, &dist_cost)
|
||||
if dist_cost < best_dist_cost {
|
||||
/* NB: currently unused; uncomment when more param tuning is added. */
|
||||
/* best_dist_cost = dist_cost; */
|
||||
@@ -189,9 +189,9 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||
}
|
||||
}
|
||||
|
||||
recomputeDistancePrefixes(cmds, num_commands, &orig_params.dist, ¶ms.dist)
|
||||
recomputeDistancePrefixes(cmds, &orig_params.dist, ¶ms.dist)
|
||||
|
||||
splitBlock(cmds, num_commands, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split)
|
||||
splitBlock(cmds, ringbuffer, pos, mask, params, &mb.literal_split, &mb.command_split, &mb.distance_split)
|
||||
|
||||
if !params.disable_literal_context_modeling {
|
||||
literal_context_multiplier = 1 << literalContextBits
|
||||
@@ -209,21 +209,30 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||
distance_histograms = make([]histogramDistance, distance_histograms_size)
|
||||
clearHistogramsDistance(distance_histograms, distance_histograms_size)
|
||||
|
||||
assert(mb.command_histograms == nil)
|
||||
mb.command_histograms_size = mb.command_split.num_types
|
||||
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
|
||||
if cap(mb.command_histograms) < int(mb.command_histograms_size) {
|
||||
mb.command_histograms = make([]histogramCommand, (mb.command_histograms_size))
|
||||
} else {
|
||||
mb.command_histograms = mb.command_histograms[:mb.command_histograms_size]
|
||||
}
|
||||
clearHistogramsCommand(mb.command_histograms, mb.command_histograms_size)
|
||||
|
||||
buildHistogramsWithContext(cmds, num_commands, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
|
||||
buildHistogramsWithContext(cmds, &mb.literal_split, &mb.command_split, &mb.distance_split, ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_modes, literal_histograms, mb.command_histograms, distance_histograms)
|
||||
literal_context_modes = nil
|
||||
|
||||
assert(mb.literal_context_map == nil)
|
||||
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
|
||||
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
||||
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
|
||||
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
||||
} else {
|
||||
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
|
||||
}
|
||||
|
||||
assert(mb.literal_histograms == nil)
|
||||
mb.literal_histograms_size = mb.literal_context_map_size
|
||||
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
|
||||
if cap(mb.literal_histograms) < int(mb.literal_histograms_size) {
|
||||
mb.literal_histograms = make([]histogramLiteral, (mb.literal_histograms_size))
|
||||
} else {
|
||||
mb.literal_histograms = mb.literal_histograms[:mb.literal_histograms_size]
|
||||
}
|
||||
|
||||
clusterHistogramsLiteral(literal_histograms, literal_histograms_size, buildMetaBlock_kMaxNumberOfHistograms, mb.literal_histograms, &mb.literal_histograms_size, mb.literal_context_map)
|
||||
literal_histograms = nil
|
||||
@@ -239,13 +248,19 @@ func buildMetaBlock(ringbuffer []byte, pos uint, mask uint, params *encoderParam
|
||||
}
|
||||
}
|
||||
|
||||
assert(mb.distance_context_map == nil)
|
||||
mb.distance_context_map_size = mb.distance_split.num_types << distanceContextBits
|
||||
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
|
||||
if cap(mb.distance_context_map) < int(mb.distance_context_map_size) {
|
||||
mb.distance_context_map = make([]uint32, (mb.distance_context_map_size))
|
||||
} else {
|
||||
mb.distance_context_map = mb.distance_context_map[:mb.distance_context_map_size]
|
||||
}
|
||||
|
||||
assert(mb.distance_histograms == nil)
|
||||
mb.distance_histograms_size = mb.distance_context_map_size
|
||||
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
|
||||
if cap(mb.distance_histograms) < int(mb.distance_histograms_size) {
|
||||
mb.distance_histograms = make([]histogramDistance, (mb.distance_histograms_size))
|
||||
} else {
|
||||
mb.distance_histograms = mb.distance_histograms[:mb.distance_histograms_size]
|
||||
}
|
||||
|
||||
clusterHistogramsDistance(distance_histograms, mb.distance_context_map_size, buildMetaBlock_kMaxNumberOfHistograms, mb.distance_histograms, &mb.distance_histograms_size, mb.distance_context_map)
|
||||
distance_histograms = nil
|
||||
@@ -298,9 +313,12 @@ func initContextBlockSplitter(self *contextBlockSplitter, alphabet_size uint, nu
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
split.num_blocks = max_num_blocks
|
||||
assert(*histograms == nil)
|
||||
*histograms_size = max_num_types * num_contexts
|
||||
*histograms = make([]histogramLiteral, (*histograms_size))
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramLiteral, (*histograms_size))
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
@@ -453,9 +471,12 @@ func contextBlockSplitterAddSymbol(self *contextBlockSplitter, symbol uint, cont
|
||||
|
||||
func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaBlockSplit) {
|
||||
var i uint
|
||||
assert(mb.literal_context_map == nil)
|
||||
mb.literal_context_map_size = mb.literal_split.num_types << literalContextBits
|
||||
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
||||
if cap(mb.literal_context_map) < int(mb.literal_context_map_size) {
|
||||
mb.literal_context_map = make([]uint32, (mb.literal_context_map_size))
|
||||
} else {
|
||||
mb.literal_context_map = mb.literal_context_map[:mb.literal_context_map_size]
|
||||
}
|
||||
|
||||
for i = 0; i < mb.literal_split.num_types; i++ {
|
||||
var offset uint32 = uint32(i * num_contexts)
|
||||
@@ -466,7 +487,7 @@ func mapStaticContexts(num_contexts uint, static_context_map []uint32, mb *metaB
|
||||
}
|
||||
}
|
||||
|
||||
func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, n_commands uint, mb *metaBlockSplit) {
|
||||
func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) {
|
||||
var lit_blocks struct {
|
||||
plain blockSplitterLiteral
|
||||
ctx contextBlockSplitter
|
||||
@@ -474,8 +495,7 @@ func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_b
|
||||
var cmd_blocks blockSplitterCommand
|
||||
var dist_blocks blockSplitterDistance
|
||||
var num_literals uint = 0
|
||||
var i uint
|
||||
for i = 0; i < n_commands; i++ {
|
||||
for i := range commands {
|
||||
num_literals += uint(commands[i].insert_len_)
|
||||
}
|
||||
|
||||
@@ -485,11 +505,10 @@ func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_b
|
||||
initContextBlockSplitter(&lit_blocks.ctx, 256, num_contexts, 512, 400.0, num_literals, &mb.literal_split, &mb.literal_histograms, &mb.literal_histograms_size)
|
||||
}
|
||||
|
||||
initBlockSplitterCommand(&cmd_blocks, numCommandSymbols, 1024, 500.0, n_commands, &mb.command_split, &mb.command_histograms, &mb.command_histograms_size)
|
||||
initBlockSplitterDistance(&dist_blocks, 64, 512, 100.0, n_commands, &mb.distance_split, &mb.distance_histograms, &mb.distance_histograms_size)
|
||||
initBlockSplitterCommand(&cmd_blocks, numCommandSymbols, 1024, 500.0, uint(len(commands)), &mb.command_split, &mb.command_histograms, &mb.command_histograms_size)
|
||||
initBlockSplitterDistance(&dist_blocks, 64, 512, 100.0, uint(len(commands)), &mb.distance_split, &mb.distance_histograms, &mb.distance_histograms_size)
|
||||
|
||||
for i = 0; i < n_commands; i++ {
|
||||
var cmd command = commands[i]
|
||||
for _, cmd := range commands {
|
||||
var j uint
|
||||
blockSplitterAddSymbolCommand(&cmd_blocks, uint(cmd.cmd_prefix_))
|
||||
for j = uint(cmd.insert_len_); j != 0; j-- {
|
||||
@@ -530,11 +549,11 @@ func buildMetaBlockGreedyInternal(ringbuffer []byte, pos uint, mask uint, prev_b
|
||||
}
|
||||
}
|
||||
|
||||
func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, n_commands uint, mb *metaBlockSplit) {
|
||||
func buildMetaBlockGreedy(ringbuffer []byte, pos uint, mask uint, prev_byte byte, prev_byte2 byte, literal_context_lut contextLUT, num_contexts uint, static_context_map []uint32, commands []command, mb *metaBlockSplit) {
|
||||
if num_contexts == 1 {
|
||||
buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, n_commands, mb)
|
||||
buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, 1, nil, commands, mb)
|
||||
} else {
|
||||
buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, n_commands, mb)
|
||||
buildMetaBlockGreedyInternal(ringbuffer, pos, mask, prev_byte, prev_byte2, literal_context_lut, num_contexts, static_context_map, commands, mb)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
7
vendor/github.com/andybalholm/brotli/metablock_command.go
generated
vendored
7
vendor/github.com/andybalholm/brotli/metablock_command.go
generated
vendored
@@ -43,9 +43,12 @@ func initBlockSplitterCommand(self *blockSplitterCommand, alphabet_size uint, mi
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
self.split_.num_blocks = max_num_blocks
|
||||
assert(*histograms == nil)
|
||||
*histograms_size = max_num_types
|
||||
*histograms = make([]histogramCommand, (*histograms_size))
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramCommand, (*histograms_size))
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
|
||||
7
vendor/github.com/andybalholm/brotli/metablock_distance.go
generated
vendored
7
vendor/github.com/andybalholm/brotli/metablock_distance.go
generated
vendored
@@ -43,9 +43,12 @@ func initBlockSplitterDistance(self *blockSplitterDistance, alphabet_size uint,
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
self.split_.num_blocks = max_num_blocks
|
||||
assert(*histograms == nil)
|
||||
*histograms_size = max_num_types
|
||||
*histograms = make([]histogramDistance, (*histograms_size))
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramDistance, *histograms_size)
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
|
||||
7
vendor/github.com/andybalholm/brotli/metablock_literal.go
generated
vendored
7
vendor/github.com/andybalholm/brotli/metablock_literal.go
generated
vendored
@@ -43,9 +43,12 @@ func initBlockSplitterLiteral(self *blockSplitterLiteral, alphabet_size uint, mi
|
||||
brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, max_num_blocks)
|
||||
brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, max_num_blocks)
|
||||
self.split_.num_blocks = max_num_blocks
|
||||
assert(*histograms == nil)
|
||||
*histograms_size = max_num_types
|
||||
*histograms = make([]histogramLiteral, (*histograms_size))
|
||||
if histograms == nil || cap(*histograms) < int(*histograms_size) {
|
||||
*histograms = make([]histogramLiteral, *histograms_size)
|
||||
} else {
|
||||
*histograms = (*histograms)[:*histograms_size]
|
||||
}
|
||||
self.histograms_ = *histograms
|
||||
|
||||
/* Clear only current histogram. */
|
||||
|
||||
12
vendor/github.com/andybalholm/brotli/ringbuffer.go
generated
vendored
12
vendor/github.com/andybalholm/brotli/ringbuffer.go
generated
vendored
@@ -27,10 +27,7 @@ type ringBuffer struct {
|
||||
}
|
||||
|
||||
func ringBufferInit(rb *ringBuffer) {
|
||||
rb.cur_size_ = 0
|
||||
rb.pos_ = 0
|
||||
rb.data_ = nil
|
||||
rb.buffer_ = nil
|
||||
}
|
||||
|
||||
func ringBufferSetup(params *encoderParams, rb *ringBuffer) {
|
||||
@@ -47,11 +44,16 @@ const kSlackForEightByteHashingEverywhere uint = 7
|
||||
/* Allocates or re-allocates data_ to the given length + plus some slack
|
||||
region before and after. Fills the slack regions with zeros. */
|
||||
func ringBufferInitBuffer(buflen uint32, rb *ringBuffer) {
|
||||
var new_data []byte = make([]byte, (2 + uint(buflen) + kSlackForEightByteHashingEverywhere))
|
||||
var new_data []byte
|
||||
var i uint
|
||||
size := 2 + int(buflen) + int(kSlackForEightByteHashingEverywhere)
|
||||
if cap(rb.data_) < size {
|
||||
new_data = make([]byte, size)
|
||||
} else {
|
||||
new_data = rb.data_[:size]
|
||||
}
|
||||
if rb.data_ != nil {
|
||||
copy(new_data, rb.data_[:2+rb.cur_size_+uint32(kSlackForEightByteHashingEverywhere)])
|
||||
rb.data_ = nil
|
||||
}
|
||||
|
||||
rb.data_ = new_data
|
||||
|
||||
112
vendor/github.com/andybalholm/brotli/write_bits.go
generated
vendored
112
vendor/github.com/andybalholm/brotli/write_bits.go
generated
vendored
@@ -8,49 +8,87 @@ package brotli
|
||||
|
||||
/* Write bits into a byte array. */
|
||||
|
||||
/* This function writes bits into bytes in increasing addresses, and within
|
||||
a byte least-significant-bit first.
|
||||
type bitWriter struct {
|
||||
dst []byte
|
||||
|
||||
The function can write up to 56 bits in one go with WriteBits
|
||||
Example: let's assume that 3 bits (Rs below) have been written already:
|
||||
|
||||
BYTE-0 BYTE+1 BYTE+2
|
||||
|
||||
0000 0RRR 0000 0000 0000 0000
|
||||
|
||||
Now, we could write 5 or less bits in MSB by just sifting by 3
|
||||
and OR'ing to BYTE-0.
|
||||
|
||||
For n bits, we take the last 5 bits, OR that with high bits in BYTE-0,
|
||||
and locate the rest in BYTE+1, BYTE+2, etc. */
|
||||
func writeBits(n_bits uint, bits uint64, pos *uint, array []byte) {
|
||||
var array_pos []byte = array[*pos>>3:]
|
||||
var bits_reserved_in_first_byte uint = (*pos & 7)
|
||||
/* implicit & 0xFF is assumed for uint8_t arithmetics */
|
||||
|
||||
var bits_left_to_write uint
|
||||
bits <<= bits_reserved_in_first_byte
|
||||
array_pos[0] |= byte(bits)
|
||||
array_pos = array_pos[1:]
|
||||
for bits_left_to_write = n_bits + bits_reserved_in_first_byte; bits_left_to_write >= 9; bits_left_to_write -= 8 {
|
||||
bits >>= 8
|
||||
array_pos[0] = byte(bits)
|
||||
array_pos = array_pos[1:]
|
||||
}
|
||||
|
||||
array_pos[0] = 0
|
||||
*pos += n_bits
|
||||
// Data waiting to be written is the low nbits of bits.
|
||||
bits uint64
|
||||
nbits uint
|
||||
}
|
||||
|
||||
func writeSingleBit(bit bool, pos *uint, array []byte) {
|
||||
func (w *bitWriter) writeBits(nb uint, b uint64) {
|
||||
w.bits |= b << w.nbits
|
||||
w.nbits += nb
|
||||
if w.nbits >= 32 {
|
||||
bits := w.bits
|
||||
w.bits >>= 32
|
||||
w.nbits -= 32
|
||||
w.dst = append(w.dst,
|
||||
byte(bits),
|
||||
byte(bits>>8),
|
||||
byte(bits>>16),
|
||||
byte(bits>>24),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *bitWriter) writeSingleBit(bit bool) {
|
||||
if bit {
|
||||
writeBits(1, 1, pos, array)
|
||||
w.writeBits(1, 1)
|
||||
} else {
|
||||
writeBits(1, 0, pos, array)
|
||||
w.writeBits(1, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func writeBitsPrepareStorage(pos uint, array []byte) {
|
||||
assert(pos&7 == 0)
|
||||
array[pos>>3] = 0
|
||||
func (w *bitWriter) jumpToByteBoundary() {
|
||||
dst := w.dst
|
||||
for w.nbits != 0 {
|
||||
dst = append(dst, byte(w.bits))
|
||||
w.bits >>= 8
|
||||
if w.nbits > 8 { // Avoid underflow
|
||||
w.nbits -= 8
|
||||
} else {
|
||||
w.nbits = 0
|
||||
}
|
||||
}
|
||||
w.bits = 0
|
||||
w.dst = dst
|
||||
}
|
||||
|
||||
func (w *bitWriter) writeBytes(b []byte) {
|
||||
if w.nbits&7 != 0 {
|
||||
panic("writeBytes with unfinished bits")
|
||||
}
|
||||
for w.nbits != 0 {
|
||||
w.dst = append(w.dst, byte(w.bits))
|
||||
w.bits >>= 8
|
||||
w.nbits -= 8
|
||||
}
|
||||
w.dst = append(w.dst, b...)
|
||||
}
|
||||
|
||||
func (w *bitWriter) getPos() uint {
|
||||
return uint(len(w.dst)<<3) + w.nbits
|
||||
}
|
||||
|
||||
func (w *bitWriter) rewind(p uint) {
|
||||
w.bits = uint64(w.dst[p>>3] & byte((1<<(p&7))-1))
|
||||
w.nbits = p & 7
|
||||
w.dst = w.dst[:p>>3]
|
||||
}
|
||||
|
||||
func (w *bitWriter) updateBits(n_bits uint, bits uint32, pos uint) {
|
||||
for n_bits > 0 {
|
||||
var byte_pos uint = pos >> 3
|
||||
var n_unchanged_bits uint = pos & 7
|
||||
var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits)
|
||||
var total_bits uint = n_unchanged_bits + n_changed_bits
|
||||
var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1)
|
||||
var unchanged_bits uint32 = uint32(w.dst[byte_pos]) & mask
|
||||
var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1)
|
||||
w.dst[byte_pos] = byte(changed_bits<<n_unchanged_bits | unchanged_bits)
|
||||
n_bits -= n_changed_bits
|
||||
bits >>= n_changed_bits
|
||||
pos += n_changed_bits
|
||||
}
|
||||
}
|
||||
|
||||
15
vendor/github.com/andybalholm/brotli/writer.go
generated
vendored
15
vendor/github.com/andybalholm/brotli/writer.go
generated
vendored
@@ -67,6 +67,9 @@ func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
|
||||
if w.dst == nil {
|
||||
return 0, errWriterClosed
|
||||
}
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
for {
|
||||
availableIn := uint(len(p))
|
||||
@@ -79,16 +82,8 @@ func (w *Writer) writeChunk(p []byte, op int) (n int, err error) {
|
||||
return n, errEncode
|
||||
}
|
||||
|
||||
outputData := encoderTakeOutput(w)
|
||||
|
||||
if len(outputData) > 0 {
|
||||
_, err = w.dst.Write(outputData)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
if len(p) == 0 || w.err != nil {
|
||||
return n, w.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/github.com/araddon/dateparse/.travis.yml
generated
vendored
3
vendor/github.com/araddon/dateparse/.travis.yml
generated
vendored
@@ -1,8 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.13.x
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
|
||||
16
vendor/github.com/araddon/dateparse/README.md
generated
vendored
16
vendor/github.com/araddon/dateparse/README.md
generated
vendored
@@ -66,7 +66,9 @@ var examples = []string{
|
||||
"Tue, 11 Jul 2017 16:28:13 +0200 (CEST)",
|
||||
"Mon, 02 Jan 2006 15:04:05 -0700",
|
||||
"Thu, 4 Jan 2018 17:53:36 +0000",
|
||||
"Mon 30 Sep 2018 09:09:09 PM UTC",
|
||||
"Mon Aug 10 15:44:11 UTC+0100 2015",
|
||||
"Thu, 4 Jan 2018 17:53:36 +0000",
|
||||
"Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)",
|
||||
"September 17, 2012 10:09am",
|
||||
"September 17, 2012 at 10:09am PST-08",
|
||||
@@ -106,6 +108,15 @@ var examples = []string{
|
||||
"2014/4/02 03:00:51",
|
||||
"2012/03/19 10:11:59",
|
||||
"2012/03/19 10:11:59.3186369",
|
||||
// yyyy:mm:dd
|
||||
"2014:3:31",
|
||||
"2014:03:31",
|
||||
"2014:4:8 22:05",
|
||||
"2014:04:08 22:05",
|
||||
"2014:04:2 03:00:51",
|
||||
"2014:4:02 03:00:51",
|
||||
"2012:03:19 10:11:59",
|
||||
"2012:03:19 10:11:59.3186369",
|
||||
// Chinese
|
||||
"2014年04月08日",
|
||||
// yyyy-mm-ddThh
|
||||
@@ -199,8 +210,9 @@ func main() {
|
||||
| Mon, 02 Jan 2006 15:04:05 MST | 2006-01-02 15:04:05 +0000 MST |
|
||||
| Tue, 11 Jul 2017 16:28:13 +0200 (CEST) | 2017-07-11 16:28:13 +0200 +0200 |
|
||||
| Mon, 02 Jan 2006 15:04:05 -0700 | 2006-01-02 15:04:05 -0700 -0700 |
|
||||
| Thu, 4 Jan 2018 17:53:36 +0000 | 2018-01-04 17:53:36 +0000 UTC |
|
||||
| Mon 30 Sep 2018 09:09:09 PM UTC | 2018-09-30 21:09:09 +0000 UTC |
|
||||
| Mon Aug 10 15:44:11 UTC+0100 2015 | 2015-08-10 15:44:11 +0000 UTC |
|
||||
| Thu, 4 Jan 2018 17:53:36 +0000 | 2018-01-04 17:53:36 +0000 UTC |
|
||||
| Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time) | 2015-07-03 18:04:07 +0100 GMT |
|
||||
| September 17, 2012 10:09am | 2012-09-17 10:09:00 +0000 UTC |
|
||||
| September 17, 2012 at 10:09am PST-08 | 2012-09-17 10:09:00 -0800 PST |
|
||||
@@ -238,7 +250,7 @@ func main() {
|
||||
| 2014/4/02 03:00:51 | 2014-04-02 03:00:51 +0000 UTC |
|
||||
| 2012/03/19 10:11:59 | 2012-03-19 10:11:59 +0000 UTC |
|
||||
| 2012/03/19 10:11:59.3186369 | 2012-03-19 10:11:59.3186369 +0000 UTC |
|
||||
| 2014年04月08日 | 2014-04-08 00:00:00 +0000 UTC |
|
||||
| 2014年04月08日 | 2014-04-08 00:00:00 +0000 UTC |
|
||||
| 2006-01-02T15:04:05+0000 | 2006-01-02 15:04:05 +0000 UTC |
|
||||
| 2009-08-12T22:15:09-07:00 | 2009-08-12 22:15:09 -0700 -0700 |
|
||||
| 2009-08-12T22:15:09 | 2009-08-12 22:15:09 +0000 UTC |
|
||||
|
||||
9
vendor/github.com/araddon/dateparse/go.mod
generated
vendored
Normal file
9
vendor/github.com/araddon/dateparse/go.mod
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
module github.com/araddon/dateparse
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4
|
||||
github.com/stretchr/testify v1.6.1
|
||||
)
|
||||
15
vendor/github.com/araddon/dateparse/go.sum
generated
vendored
Normal file
15
vendor/github.com/araddon/dateparse/go.sum
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4 h1:8qmTC5ByIXO3GP/IzBkxcZ/99VITvnIETDhdFz/om7A=
|
||||
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
384
vendor/github.com/araddon/dateparse/parseany.go
generated
vendored
384
vendor/github.com/araddon/dateparse/parseany.go
generated
vendored
@@ -17,6 +17,23 @@ import (
|
||||
// gou.SetColorOutput()
|
||||
// }
|
||||
|
||||
var days = []string{
|
||||
"mon",
|
||||
"tue",
|
||||
"wed",
|
||||
"thu",
|
||||
"fri",
|
||||
"sat",
|
||||
"sun",
|
||||
"monday",
|
||||
"tuesday",
|
||||
"wednesday",
|
||||
"thursday",
|
||||
"friday",
|
||||
"saturday",
|
||||
"sunday",
|
||||
}
|
||||
|
||||
var months = []string{
|
||||
"january",
|
||||
"february",
|
||||
@@ -49,22 +66,24 @@ const (
|
||||
dateDigitDot // 10
|
||||
dateDigitDotDot
|
||||
dateDigitSlash
|
||||
dateDigitColon
|
||||
dateDigitChineseYear
|
||||
dateDigitChineseYearWs
|
||||
dateDigitWs // 15
|
||||
dateDigitChineseYearWs // 15
|
||||
dateDigitWs
|
||||
dateDigitWsMoYear
|
||||
dateDigitWsMolong
|
||||
dateAlpha
|
||||
dateAlphaWs
|
||||
dateAlphaWsDigit // 20
|
||||
dateAlphaWs // 20
|
||||
dateAlphaWsDigit
|
||||
dateAlphaWsDigitMore
|
||||
dateAlphaWsDigitMoreWs
|
||||
dateAlphaWsDigitMoreWsYear
|
||||
dateAlphaWsMonth
|
||||
dateAlphaWsMonth // 25
|
||||
dateAlphaWsDigitYearmaybe
|
||||
dateAlphaWsMonthMore
|
||||
dateAlphaWsMonthSuffix
|
||||
dateAlphaWsMore
|
||||
dateAlphaWsAtTime
|
||||
dateAlphaWsAtTime // 30
|
||||
dateAlphaWsAlpha
|
||||
dateAlphaWsAlphaYearmaybe
|
||||
dateAlphaPeriodWsDigit
|
||||
@@ -120,8 +139,8 @@ func unknownErr(datestr string) error {
|
||||
// ParseAny parse an unknown date format, detect the layout.
|
||||
// Normal parse. Equivalent Timezone rules as time.Parse().
|
||||
// NOTE: please see readme on mmdd vs ddmm ambiguous dates.
|
||||
func ParseAny(datestr string) (time.Time, error) {
|
||||
p, err := parseTime(datestr, nil)
|
||||
func ParseAny(datestr string, opts ...ParserOption) (time.Time, error) {
|
||||
p, err := parseTime(datestr, nil, opts...)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
@@ -133,8 +152,8 @@ func ParseAny(datestr string) (time.Time, error) {
|
||||
// datestring, it uses the given location rules for any zone interpretation.
|
||||
// That is, MST means one thing when using America/Denver and something else
|
||||
// in other locations.
|
||||
func ParseIn(datestr string, loc *time.Location) (time.Time, error) {
|
||||
p, err := parseTime(datestr, loc)
|
||||
func ParseIn(datestr string, loc *time.Location, opts ...ParserOption) (time.Time, error) {
|
||||
p, err := parseTime(datestr, loc, opts...)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
@@ -156,8 +175,8 @@ func ParseIn(datestr string, loc *time.Location) (time.Time, error) {
|
||||
//
|
||||
// t, err := dateparse.ParseIn("3/1/2014", denverLoc)
|
||||
//
|
||||
func ParseLocal(datestr string) (time.Time, error) {
|
||||
p, err := parseTime(datestr, time.Local)
|
||||
func ParseLocal(datestr string, opts ...ParserOption) (time.Time, error) {
|
||||
p, err := parseTime(datestr, time.Local, opts...)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
@@ -166,8 +185,8 @@ func ParseLocal(datestr string) (time.Time, error) {
|
||||
|
||||
// MustParse parse a date, and panic if it can't be parsed. Used for testing.
|
||||
// Not recommended for most use-cases.
|
||||
func MustParse(datestr string) time.Time {
|
||||
p, err := parseTime(datestr, nil)
|
||||
func MustParse(datestr string, opts ...ParserOption) time.Time {
|
||||
p, err := parseTime(datestr, nil, opts...)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
@@ -184,8 +203,8 @@ func MustParse(datestr string) time.Time {
|
||||
// layout, err := dateparse.ParseFormat("2013-02-01 00:00:00")
|
||||
// // layout = "2006-01-02 15:04:05"
|
||||
//
|
||||
func ParseFormat(datestr string) (string, error) {
|
||||
p, err := parseTime(datestr, nil)
|
||||
func ParseFormat(datestr string, opts ...ParserOption) (string, error) {
|
||||
p, err := parseTime(datestr, nil, opts...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -198,8 +217,8 @@ func ParseFormat(datestr string) (string, error) {
|
||||
|
||||
// ParseStrict parse an unknown date format. IF the date is ambigous
|
||||
// mm/dd vs dd/mm then return an error. These return errors: 3.3.2014 , 8/8/71 etc
|
||||
func ParseStrict(datestr string) (time.Time, error) {
|
||||
p, err := parseTime(datestr, nil)
|
||||
func ParseStrict(datestr string, opts ...ParserOption) (time.Time, error) {
|
||||
p, err := parseTime(datestr, nil, opts...)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
@@ -209,9 +228,31 @@ func ParseStrict(datestr string) (time.Time, error) {
|
||||
return p.parse()
|
||||
}
|
||||
|
||||
func parseTime(datestr string, loc *time.Location) (*parser, error) {
|
||||
func parseTime(datestr string, loc *time.Location, opts ...ParserOption) (p *parser, err error) {
|
||||
|
||||
p = newParser(datestr, loc, opts...)
|
||||
if p.retryAmbiguousDateWithSwap {
|
||||
// month out of range signifies that a day/month swap is the correct solution to an ambiguous date
|
||||
// this is because it means that a day is being interpreted as a month and overflowing the valid value for that
|
||||
// by retrying in this case, we can fix a common situation with no assumptions
|
||||
defer func() {
|
||||
if p.ambiguousMD {
|
||||
// if it errors out with the following error, swap before we
|
||||
// get out of this function to reduce scope it needs to be applied on
|
||||
_, err := p.parse()
|
||||
if err != nil && strings.Contains(err.Error(), "month out of range") {
|
||||
// create the option to reverse the preference
|
||||
preferMonthFirst := PreferMonthFirst(!p.preferMonthFirst)
|
||||
// turn off the retry to avoid endless recursion
|
||||
retryAmbiguousDateWithSwap := RetryAmbiguousDateWithSwap(false)
|
||||
modifiedOpts := append(opts, preferMonthFirst, retryAmbiguousDateWithSwap)
|
||||
p, err = parseTime(datestr, time.Local, modifiedOpts...)
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
}
|
||||
|
||||
p := newParser(datestr, loc)
|
||||
i := 0
|
||||
|
||||
// General strategy is to read rune by rune through the date looking for
|
||||
@@ -257,6 +298,31 @@ iterRunes:
|
||||
// 03/31/2005
|
||||
// 2014/02/24
|
||||
p.stateDate = dateDigitSlash
|
||||
if i == 4 {
|
||||
p.yearlen = i
|
||||
p.moi = i + 1
|
||||
p.setYear()
|
||||
} else {
|
||||
p.ambiguousMD = true
|
||||
if p.preferMonthFirst {
|
||||
if p.molen == 0 {
|
||||
p.molen = i
|
||||
p.setMonth()
|
||||
p.dayi = i + 1
|
||||
}
|
||||
} else {
|
||||
if p.daylen == 0 {
|
||||
p.daylen = i
|
||||
p.setDay()
|
||||
p.moi = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case ':':
|
||||
// 03/31/2005
|
||||
// 2014/02/24
|
||||
p.stateDate = dateDigitColon
|
||||
if i == 4 {
|
||||
p.yearlen = i
|
||||
p.moi = i + 1
|
||||
@@ -446,6 +512,51 @@ iterRunes:
|
||||
p.setDay()
|
||||
p.yeari = i + 1
|
||||
}
|
||||
} else {
|
||||
if p.molen == 0 {
|
||||
p.molen = i - p.moi
|
||||
p.setMonth()
|
||||
p.yeari = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case dateDigitColon:
|
||||
// 2014:07:10 06:55:38.156283
|
||||
// 03:19:2012 10:11:59
|
||||
// 04:2:2014 03:00:37
|
||||
// 3:1:2012 10:11:59
|
||||
// 4:8:2014 22:05
|
||||
// 3:1:2014
|
||||
// 10:13:2014
|
||||
// 01:02:2006
|
||||
// 1:2:06
|
||||
|
||||
switch r {
|
||||
case ' ':
|
||||
p.stateTime = timeStart
|
||||
if p.yearlen == 0 {
|
||||
p.yearlen = i - p.yeari
|
||||
p.setYear()
|
||||
} else if p.daylen == 0 {
|
||||
p.daylen = i - p.dayi
|
||||
p.setDay()
|
||||
}
|
||||
break iterRunes
|
||||
case ':':
|
||||
if p.yearlen > 0 {
|
||||
// 2014:07:10 06:55:38.156283
|
||||
if p.molen == 0 {
|
||||
p.molen = i - p.moi
|
||||
p.setMonth()
|
||||
p.dayi = i + 1
|
||||
}
|
||||
} else if p.preferMonthFirst {
|
||||
if p.daylen == 0 {
|
||||
p.daylen = i - p.dayi
|
||||
p.setDay()
|
||||
p.yeari = i + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -587,9 +698,21 @@ iterRunes:
|
||||
} else {
|
||||
// This is possibly ambiguous? May will parse as either though.
|
||||
// So, it could return in-correct format.
|
||||
// May 05, 2005, 05:05:05
|
||||
// May 05 2005, 05:05:05
|
||||
// Jul 05, 2005, 05:05:05
|
||||
// dateAlphaWs
|
||||
// May 05, 2005, 05:05:05
|
||||
// May 05 2005, 05:05:05
|
||||
// Jul 05, 2005, 05:05:05
|
||||
// May 8 17:57:51 2009
|
||||
// May 8 17:57:51 2009
|
||||
// skip & return to dateStart
|
||||
// Tue 05 May 2020, 05:05:05
|
||||
// Mon Jan 2 15:04:05 2006
|
||||
|
||||
maybeDay := strings.ToLower(datestr[0:i])
|
||||
if isDay(maybeDay) {
|
||||
// using skip throws off indices used by other code; saner to restart
|
||||
return parseTime(datestr[i+1:], loc)
|
||||
}
|
||||
p.stateDate = dateAlphaWs
|
||||
}
|
||||
|
||||
@@ -618,7 +741,7 @@ iterRunes:
|
||||
} else if i == 4 {
|
||||
// gross
|
||||
datestr = datestr[0:i-1] + datestr[i:]
|
||||
return parseTime(datestr, loc)
|
||||
return parseTime(datestr, loc, opts...)
|
||||
} else {
|
||||
return nil, unknownErr(datestr)
|
||||
}
|
||||
@@ -631,11 +754,14 @@ iterRunes:
|
||||
// Mon Jan 02 15:04:05 -0700 2006
|
||||
// Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)
|
||||
// Mon Aug 10 15:44:11 UTC+0100 2015
|
||||
// dateAlphaWsDigit
|
||||
// May 8, 2009 5:57:51 PM
|
||||
// May 8 2009 5:57:51 PM
|
||||
// oct 1, 1970
|
||||
// oct 7, '70
|
||||
// dateAlphaWsDigit
|
||||
// May 8, 2009 5:57:51 PM
|
||||
// May 8 2009 5:57:51 PM
|
||||
// May 8 17:57:51 2009
|
||||
// May 8 17:57:51 2009
|
||||
// May 08 17:57:51 2009
|
||||
// oct 1, 1970
|
||||
// oct 7, '70
|
||||
switch {
|
||||
case unicode.IsLetter(r):
|
||||
p.set(0, "Mon")
|
||||
@@ -653,6 +779,9 @@ iterRunes:
|
||||
// oct 1, 1970
|
||||
// oct 7, '70
|
||||
// oct. 7, 1970
|
||||
// May 8 17:57:51 2009
|
||||
// May 8 17:57:51 2009
|
||||
// May 08 17:57:51 2009
|
||||
if r == ',' {
|
||||
p.daylen = i - p.dayi
|
||||
p.setDay()
|
||||
@@ -661,11 +790,31 @@ iterRunes:
|
||||
p.daylen = i - p.dayi
|
||||
p.setDay()
|
||||
p.yeari = i + 1
|
||||
p.stateDate = dateAlphaWsDigitMoreWs
|
||||
p.stateDate = dateAlphaWsDigitYearmaybe
|
||||
p.stateTime = timeStart
|
||||
} else if unicode.IsLetter(r) {
|
||||
p.stateDate = dateAlphaWsMonthSuffix
|
||||
i--
|
||||
}
|
||||
case dateAlphaWsDigitYearmaybe:
|
||||
// x
|
||||
// May 8 2009 5:57:51 PM
|
||||
// May 8 17:57:51 2009
|
||||
// May 8 17:57:51 2009
|
||||
// May 08 17:57:51 2009
|
||||
// Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)
|
||||
if r == ':' {
|
||||
// Guessed wrong; was not a year
|
||||
i = i - 3
|
||||
p.stateDate = dateAlphaWsDigit
|
||||
p.yeari = 0
|
||||
break iterRunes
|
||||
} else if r == ' ' {
|
||||
// must be year format, not 15:04
|
||||
p.yearlen = i - p.yeari
|
||||
p.setYear()
|
||||
break iterRunes
|
||||
}
|
||||
case dateAlphaWsDigitMore:
|
||||
// x
|
||||
// May 8, 2009 5:57:51 PM
|
||||
@@ -698,42 +847,6 @@ iterRunes:
|
||||
break iterRunes
|
||||
}
|
||||
|
||||
case dateAlphaWsAlpha:
|
||||
// Mon Jan _2 15:04:05 2006
|
||||
// Mon Jan 02 15:04:05 -0700 2006
|
||||
// Mon Jan _2 15:04:05 MST 2006
|
||||
// Mon Aug 10 15:44:11 UTC+0100 2015
|
||||
// Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)
|
||||
if r == ' ' {
|
||||
if p.dayi > 0 {
|
||||
p.daylen = i - p.dayi
|
||||
p.setDay()
|
||||
p.yeari = i + 1
|
||||
p.stateDate = dateAlphaWsAlphaYearmaybe
|
||||
p.stateTime = timeStart
|
||||
}
|
||||
} else if unicode.IsDigit(r) {
|
||||
if p.dayi == 0 {
|
||||
p.dayi = i
|
||||
}
|
||||
}
|
||||
|
||||
case dateAlphaWsAlphaYearmaybe:
|
||||
// x
|
||||
// Mon Jan _2 15:04:05 2006
|
||||
// Fri Jul 03 2015 18:04:07 GMT+0100 (GMT Daylight Time)
|
||||
if r == ':' {
|
||||
i = i - 3
|
||||
p.stateDate = dateAlphaWsAlpha
|
||||
p.yeari = 0
|
||||
break iterRunes
|
||||
} else if r == ' ' {
|
||||
// must be year format, not 15:04
|
||||
p.yearlen = i - p.yeari
|
||||
p.setYear()
|
||||
break iterRunes
|
||||
}
|
||||
|
||||
case dateAlphaWsMonth:
|
||||
// April 8, 2009
|
||||
// April 8 2009
|
||||
@@ -783,25 +896,25 @@ iterRunes:
|
||||
case 't', 'T':
|
||||
if p.nextIs(i, 'h') || p.nextIs(i, 'H') {
|
||||
if len(datestr) > i+2 {
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc)
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...)
|
||||
}
|
||||
}
|
||||
case 'n', 'N':
|
||||
if p.nextIs(i, 'd') || p.nextIs(i, 'D') {
|
||||
if len(datestr) > i+2 {
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc)
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...)
|
||||
}
|
||||
}
|
||||
case 's', 'S':
|
||||
if p.nextIs(i, 't') || p.nextIs(i, 'T') {
|
||||
if len(datestr) > i+2 {
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc)
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...)
|
||||
}
|
||||
}
|
||||
case 'r', 'R':
|
||||
if p.nextIs(i, 'd') || p.nextIs(i, 'D') {
|
||||
if len(datestr) > i+2 {
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc)
|
||||
return parseTime(fmt.Sprintf("%s%s", p.datestr[0:i], p.datestr[i+2:]), loc, opts...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -975,7 +1088,7 @@ iterRunes:
|
||||
// 2014-05-11 08:20:13,787
|
||||
ds := []byte(p.datestr)
|
||||
ds[i] = '.'
|
||||
return parseTime(string(ds), loc)
|
||||
return parseTime(string(ds), loc, opts...)
|
||||
case '-', '+':
|
||||
// 03:21:51+00:00
|
||||
p.stateTime = timeOffset
|
||||
@@ -997,6 +1110,8 @@ iterRunes:
|
||||
} else {
|
||||
p.seclen = i - p.seci
|
||||
}
|
||||
// (Z)ulu time
|
||||
p.loc = time.UTC
|
||||
case 'a', 'A':
|
||||
if p.nextIs(i, 't') || p.nextIs(i, 'T') {
|
||||
// x
|
||||
@@ -1139,7 +1254,9 @@ iterRunes:
|
||||
switch r {
|
||||
case ' ':
|
||||
p.set(p.offseti, "-0700")
|
||||
p.yeari = i + 1
|
||||
if p.yeari == 0 {
|
||||
p.yeari = i + 1
|
||||
}
|
||||
p.stateTime = timeWsAlphaZoneOffsetWs
|
||||
}
|
||||
case timeWsAlphaZoneOffsetWs:
|
||||
@@ -1630,7 +1747,10 @@ iterRunes:
|
||||
case dateAlphaWsAlpha:
|
||||
return p, nil
|
||||
|
||||
case dateAlphaWsAlphaYearmaybe:
|
||||
case dateAlphaWsDigit:
|
||||
return p, nil
|
||||
|
||||
case dateAlphaWsDigitYearmaybe:
|
||||
return p, nil
|
||||
|
||||
case dateDigitSlash:
|
||||
@@ -1640,6 +1760,13 @@ iterRunes:
|
||||
// 2014/10/13
|
||||
return p, nil
|
||||
|
||||
case dateDigitColon:
|
||||
// 3:1:2014
|
||||
// 10:13:2014
|
||||
// 01:02:2006
|
||||
// 2014:10:13
|
||||
return p, nil
|
||||
|
||||
case dateDigitChineseYear:
|
||||
// dateDigitChineseYear
|
||||
// 2014年04月08日
|
||||
@@ -1667,48 +1794,75 @@ iterRunes:
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
loc *time.Location
|
||||
preferMonthFirst bool
|
||||
ambiguousMD bool
|
||||
stateDate dateState
|
||||
stateTime timeState
|
||||
format []byte
|
||||
datestr string
|
||||
fullMonth string
|
||||
skip int
|
||||
extra int
|
||||
part1Len int
|
||||
yeari int
|
||||
yearlen int
|
||||
moi int
|
||||
molen int
|
||||
dayi int
|
||||
daylen int
|
||||
houri int
|
||||
hourlen int
|
||||
mini int
|
||||
minlen int
|
||||
seci int
|
||||
seclen int
|
||||
msi int
|
||||
mslen int
|
||||
offseti int
|
||||
offsetlen int
|
||||
tzi int
|
||||
tzlen int
|
||||
t *time.Time
|
||||
loc *time.Location
|
||||
preferMonthFirst bool
|
||||
retryAmbiguousDateWithSwap bool
|
||||
ambiguousMD bool
|
||||
stateDate dateState
|
||||
stateTime timeState
|
||||
format []byte
|
||||
datestr string
|
||||
fullMonth string
|
||||
skip int
|
||||
extra int
|
||||
part1Len int
|
||||
yeari int
|
||||
yearlen int
|
||||
moi int
|
||||
molen int
|
||||
dayi int
|
||||
daylen int
|
||||
houri int
|
||||
hourlen int
|
||||
mini int
|
||||
minlen int
|
||||
seci int
|
||||
seclen int
|
||||
msi int
|
||||
mslen int
|
||||
offseti int
|
||||
offsetlen int
|
||||
tzi int
|
||||
tzlen int
|
||||
t *time.Time
|
||||
}
|
||||
|
||||
func newParser(dateStr string, loc *time.Location) *parser {
|
||||
p := parser{
|
||||
stateDate: dateStart,
|
||||
stateTime: timeIgnore,
|
||||
datestr: dateStr,
|
||||
loc: loc,
|
||||
preferMonthFirst: true,
|
||||
// ParserOption defines a function signature implemented by options
|
||||
// Options defined like this accept the parser and operate on the data within
|
||||
type ParserOption func(*parser) error
|
||||
|
||||
// PreferMonthFirst is an option that allows preferMonthFirst to be changed from its default
|
||||
func PreferMonthFirst(preferMonthFirst bool) ParserOption {
|
||||
return func(p *parser) error {
|
||||
p.preferMonthFirst = preferMonthFirst
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RetryAmbiguousDateWithSwap is an option that allows retryAmbiguousDateWithSwap to be changed from its default
|
||||
func RetryAmbiguousDateWithSwap(retryAmbiguousDateWithSwap bool) ParserOption {
|
||||
return func(p *parser) error {
|
||||
p.retryAmbiguousDateWithSwap = retryAmbiguousDateWithSwap
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func newParser(dateStr string, loc *time.Location, opts ...ParserOption) *parser {
|
||||
p := &parser{
|
||||
stateDate: dateStart,
|
||||
stateTime: timeIgnore,
|
||||
datestr: dateStr,
|
||||
loc: loc,
|
||||
preferMonthFirst: true,
|
||||
retryAmbiguousDateWithSwap: false,
|
||||
}
|
||||
p.format = []byte(dateStr)
|
||||
return &p
|
||||
|
||||
// allow the options to mutate the parser fields from their defaults
|
||||
for _, option := range opts {
|
||||
option(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *parser) nextIs(i int, b byte) bool {
|
||||
@@ -1854,6 +2008,14 @@ func (p *parser) parse() (time.Time, error) {
|
||||
}
|
||||
return time.ParseInLocation(string(p.format), p.datestr, p.loc)
|
||||
}
|
||||
func isDay(alpha string) bool {
|
||||
for _, day := range days {
|
||||
if alpha == day {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
func isMonthFull(alpha string) bool {
|
||||
for _, month := range months {
|
||||
if alpha == month {
|
||||
|
||||
4
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
4
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
@@ -228,12 +228,12 @@ func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
|
||||
func (m *Metrics) collectStats() {
|
||||
for {
|
||||
time.Sleep(m.ProfileInterval)
|
||||
m.emitRuntimeStats()
|
||||
m.EmitRuntimeStats()
|
||||
}
|
||||
}
|
||||
|
||||
// Emits various runtime statsitics
|
||||
func (m *Metrics) emitRuntimeStats() {
|
||||
func (m *Metrics) EmitRuntimeStats() {
|
||||
// Export number of Goroutines
|
||||
numRoutines := runtime.NumGoroutine()
|
||||
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
|
||||
|
||||
7
vendor/github.com/armon/go-metrics/start.go
generated
vendored
7
vendor/github.com/armon/go-metrics/start.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
// Config is used to configure metrics settings
|
||||
@@ -48,6 +48,11 @@ func init() {
|
||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
||||
}
|
||||
|
||||
// Default returns the shared global metrics instance.
|
||||
func Default() *Metrics {
|
||||
return globalMetrics.Load().(*Metrics)
|
||||
}
|
||||
|
||||
// DefaultConfig provides a sane default configuration
|
||||
func DefaultConfig(serviceName string) *Config {
|
||||
c := &Config{
|
||||
|
||||
74
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
74
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
@@ -50,7 +50,7 @@ package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
@@ -207,9 +207,10 @@ func (e *Expiry) ExpiresAt() time.Time {
|
||||
// first instance of the credentials Value. All calls to Get() after that
|
||||
// will return the cached credentials Value until IsExpired() returns true.
|
||||
type Credentials struct {
|
||||
creds atomic.Value
|
||||
sf singleflight.Group
|
||||
sf singleflight.Group
|
||||
|
||||
m sync.RWMutex
|
||||
creds Value
|
||||
provider Provider
|
||||
}
|
||||
|
||||
@@ -218,7 +219,6 @@ func NewCredentials(provider Provider) *Credentials {
|
||||
c := &Credentials{
|
||||
provider: provider,
|
||||
}
|
||||
c.creds.Store(Value{})
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -235,8 +235,17 @@ func NewCredentials(provider Provider) *Credentials {
|
||||
//
|
||||
// Passed in Context is equivalent to aws.Context, and context.Context.
|
||||
func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
||||
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
|
||||
return curCreds.(Value), nil
|
||||
// Check if credentials are cached, and not expired.
|
||||
select {
|
||||
case curCreds, ok := <-c.asyncIsExpired():
|
||||
// ok will only be true, of the credentials were not expired. ok will
|
||||
// be false and have no value if the credentials are expired.
|
||||
if ok {
|
||||
return curCreds, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return Value{}, awserr.New("RequestCanceled",
|
||||
"request context canceled", ctx.Err())
|
||||
}
|
||||
|
||||
// Cannot pass context down to the actual retrieve, because the first
|
||||
@@ -254,18 +263,23 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) {
|
||||
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
|
||||
return curCreds.(Value), nil
|
||||
func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
|
||||
return curCreds, nil
|
||||
}
|
||||
|
||||
var creds Value
|
||||
var err error
|
||||
if p, ok := c.provider.(ProviderWithContext); ok {
|
||||
creds, err = p.RetrieveWithContext(ctx)
|
||||
} else {
|
||||
creds, err = c.provider.Retrieve()
|
||||
}
|
||||
if err == nil {
|
||||
c.creds.Store(creds)
|
||||
c.creds = creds
|
||||
}
|
||||
|
||||
return creds, err
|
||||
@@ -290,7 +304,10 @@ func (c *Credentials) Get() (Value, error) {
|
||||
// This will override the Provider's expired state, and force Credentials
|
||||
// to call the Provider's Retrieve().
|
||||
func (c *Credentials) Expire() {
|
||||
c.creds.Store(Value{})
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
c.creds = Value{}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
@@ -299,11 +316,32 @@ func (c *Credentials) Expire() {
|
||||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
return c.isExpired(c.creds.Load())
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
return c.isExpiredLocked(c.creds)
|
||||
}
|
||||
|
||||
// isExpired helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpired(creds interface{}) bool {
|
||||
// asyncIsExpired returns a channel of credentials Value. If the channel is
|
||||
// closed the credentials are expired and credentials value are not empty.
|
||||
func (c *Credentials) asyncIsExpired() <-chan Value {
|
||||
ch := make(chan Value, 1)
|
||||
go func() {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
|
||||
ch <- curCreds
|
||||
}
|
||||
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// isExpiredLocked helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpiredLocked(creds interface{}) bool {
|
||||
return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
|
||||
}
|
||||
|
||||
@@ -311,13 +349,17 @@ func (c *Credentials) isExpired(creds interface{}) bool {
|
||||
// the underlying Provider, if it supports that interface. Otherwise, it returns
|
||||
// an error.
|
||||
func (c *Credentials) ExpiresAt() (time.Time, error) {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
expirer, ok := c.provider.(Expirer)
|
||||
if !ok {
|
||||
return time.Time{}, awserr.New("ProviderNotExpirer",
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName),
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()",
|
||||
c.creds.ProviderName),
|
||||
nil)
|
||||
}
|
||||
if c.creds.Load().(Value) == (Value{}) {
|
||||
if c.creds == (Value{}) {
|
||||
// set expiration time to the distant past
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
||||
743
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
743
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
File diff suppressed because it is too large
Load Diff
27
vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
generated
vendored
Normal file
27
vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// +build go1.13
|
||||
|
||||
package session
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build go1.7
|
||||
// +build !go1.13,go1.7
|
||||
|
||||
package session
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
// Transport that should be used when a custom CA bundle is specified with the
|
||||
// SDK.
|
||||
func getCABundleTransport() *http.Transport {
|
||||
func getCustomTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
27
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
27
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
generated
vendored
@@ -208,6 +208,8 @@ env values as well.
|
||||
|
||||
AWS_SDK_LOAD_CONFIG=1
|
||||
|
||||
Custom Shared Config and Credential Files
|
||||
|
||||
Shared credentials file path can be set to instruct the SDK to use an alternative
|
||||
file for the shared credentials. If not set the file will be loaded from
|
||||
$HOME/.aws/credentials on Linux/Unix based systems, and
|
||||
@@ -222,6 +224,8 @@ $HOME/.aws/config on Linux/Unix based systems, and
|
||||
|
||||
AWS_CONFIG_FILE=$HOME/my_shared_config
|
||||
|
||||
Custom CA Bundle
|
||||
|
||||
Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
|
||||
will use instead of the default system's root CA bundle. Use this only
|
||||
if you want to replace the CA bundle the SDK uses for TLS requests.
|
||||
@@ -242,6 +246,29 @@ Setting a custom HTTPClient in the aws.Config options will override this setting
|
||||
To use this option and custom HTTP client, the HTTP client needs to be provided
|
||||
when creating the session. Not the service client.
|
||||
|
||||
Custom Client TLS Certificate
|
||||
|
||||
The SDK supports the environment and session option being configured with
|
||||
Client TLS certificates that are sent as a part of the client's TLS handshake
|
||||
for client authentication. If used, both Cert and Key values are required. If
|
||||
one is missing, or either fail to load the contents of the file an error will
|
||||
be returned.
|
||||
|
||||
HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
or creating the session will fail.
|
||||
|
||||
AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
|
||||
AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
|
||||
|
||||
This can also be configured via the session.Options ClientTLSCert and ClientTLSKey.
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
ClientTLSCert: myCertFile,
|
||||
ClientTLSKey: myKeyFile,
|
||||
})
|
||||
|
||||
Custom EC2 IMDS Endpoint
|
||||
|
||||
The endpoint of the EC2 IMDS client can be configured via the environment
|
||||
variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
|
||||
Session. See Options.EC2IMDSEndpoint for more details.
|
||||
|
||||
25
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
25
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
@@ -101,6 +101,18 @@ type envConfig struct {
|
||||
// AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
|
||||
CustomCABundle string
|
||||
|
||||
// Sets the TLC client certificate that should be used by the SDK's HTTP transport
|
||||
// when making requests. The certificate must be paired with a TLS client key file.
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
|
||||
ClientTLSCert string
|
||||
|
||||
// Sets the TLC client key that should be used by the SDK's HTTP transport
|
||||
// when making requests. The key must be paired with a TLS client certificate file.
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
|
||||
ClientTLSKey string
|
||||
|
||||
csmEnabled string
|
||||
CSMEnabled *bool
|
||||
CSMPort string
|
||||
@@ -219,6 +231,15 @@ var (
|
||||
ec2IMDSEndpointEnvKey = []string{
|
||||
"AWS_EC2_METADATA_SERVICE_ENDPOINT",
|
||||
}
|
||||
useCABundleKey = []string{
|
||||
"AWS_CA_BUNDLE",
|
||||
}
|
||||
useClientTLSCert = []string{
|
||||
"AWS_SDK_GO_CLIENT_TLS_CERT",
|
||||
}
|
||||
useClientTLSKey = []string{
|
||||
"AWS_SDK_GO_CLIENT_TLS_KEY",
|
||||
}
|
||||
)
|
||||
|
||||
// loadEnvConfig retrieves the SDK's environment configuration.
|
||||
@@ -302,7 +323,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
|
||||
cfg.SharedConfigFile = defaults.SharedConfigFilename()
|
||||
}
|
||||
|
||||
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
|
||||
setFromEnvVal(&cfg.CustomCABundle, useCABundleKey)
|
||||
setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert)
|
||||
setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey)
|
||||
|
||||
var err error
|
||||
// STS Regional Endpoint variable
|
||||
|
||||
187
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
187
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
@@ -25,6 +25,13 @@ const (
|
||||
// ErrCodeSharedConfig represents an error that occurs in the shared
|
||||
// configuration logic
|
||||
ErrCodeSharedConfig = "SharedConfigErr"
|
||||
|
||||
// ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle.
|
||||
ErrCodeLoadCustomCABundle = "LoadCustomCABundleError"
|
||||
|
||||
// ErrCodeLoadClientTLSCert error code for unable to load client TLS
|
||||
// certificate or key
|
||||
ErrCodeLoadClientTLSCert = "LoadClientTLSCertError"
|
||||
)
|
||||
|
||||
// ErrSharedConfigSourceCollision will be returned if a section contains both
|
||||
@@ -229,17 +236,46 @@ type Options struct {
|
||||
// the SDK will use instead of the default system's root CA bundle. Use this
|
||||
// only if you want to replace the CA bundle the SDK uses for TLS requests.
|
||||
//
|
||||
// Enabling this option will attempt to merge the Transport into the SDK's HTTP
|
||||
// client. If the client's Transport is not a http.Transport an error will be
|
||||
// returned. If the Transport's TLS config is set this option will cause the SDK
|
||||
// HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
// or creating the session will fail.
|
||||
//
|
||||
// If the Transport's TLS config is set this option will cause the SDK
|
||||
// to overwrite the Transport's TLS config's RootCAs value. If the CA
|
||||
// bundle reader contains multiple certificates all of them will be loaded.
|
||||
//
|
||||
// The Session option CustomCABundle is also available when creating sessions
|
||||
// to also enable this feature. CustomCABundle session option field has priority
|
||||
// over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
|
||||
// Can also be specified via the environment variable:
|
||||
//
|
||||
// AWS_CA_BUNDLE=$HOME/ca_bundle
|
||||
//
|
||||
// Can also be specified via the shared config field:
|
||||
//
|
||||
// ca_bundle = $HOME/ca_bundle
|
||||
CustomCABundle io.Reader
|
||||
|
||||
// Reader for the TLC client certificate that should be used by the SDK's
|
||||
// HTTP transport when making requests. The certificate must be paired with
|
||||
// a TLS client key file. Will be ignored if both are not provided.
|
||||
//
|
||||
// HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
// or creating the session will fail.
|
||||
//
|
||||
// Can also be specified via the environment variable:
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
|
||||
ClientTLSCert io.Reader
|
||||
|
||||
// Reader for the TLC client key that should be used by the SDK's HTTP
|
||||
// transport when making requests. The key must be paired with a TLS client
|
||||
// certificate file. Will be ignored if both are not provided.
|
||||
//
|
||||
// HTTP Client's Transport concrete implementation must be a http.Transport
|
||||
// or creating the session will fail.
|
||||
//
|
||||
// Can also be specified via the environment variable:
|
||||
//
|
||||
// AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
|
||||
ClientTLSKey io.Reader
|
||||
|
||||
// The handlers that the session and all API clients will be created with.
|
||||
// This must be a complete set of handlers. Use the defaults.Handlers()
|
||||
// function to initialize this value before changing the handlers to be
|
||||
@@ -319,17 +355,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
|
||||
envCfg.EnableSharedConfig = true
|
||||
}
|
||||
|
||||
// Only use AWS_CA_BUNDLE if session option is not provided.
|
||||
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
|
||||
f, err := os.Open(envCfg.CustomCABundle)
|
||||
if err != nil {
|
||||
return nil, awserr.New("LoadCustomCABundleError",
|
||||
"failed to open custom CA bundle PEM file", err)
|
||||
}
|
||||
defer f.Close()
|
||||
opts.CustomCABundle = f
|
||||
}
|
||||
|
||||
return newSession(opts, envCfg, &opts.Config)
|
||||
}
|
||||
|
||||
@@ -460,6 +485,10 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Session{
|
||||
Config: cfg,
|
||||
Handlers: handlers,
|
||||
@@ -479,13 +508,6 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
||||
}
|
||||
}
|
||||
|
||||
// Setup HTTP client with custom cert bundle if enabled
|
||||
if opts.CustomCABundle != nil {
|
||||
if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -529,22 +551,83 @@ func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
|
||||
return csmConfig{}, nil
|
||||
}
|
||||
|
||||
func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||
func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
|
||||
// CA Bundle can be specified in both environment variable shared config file.
|
||||
var caBundleFilename = envCfg.CustomCABundle
|
||||
if len(caBundleFilename) == 0 {
|
||||
caBundleFilename = sharedCfg.CustomCABundle
|
||||
}
|
||||
|
||||
// Only use environment value if session option is not provided.
|
||||
customTLSOptions := map[string]struct {
|
||||
filename string
|
||||
field *io.Reader
|
||||
errCode string
|
||||
}{
|
||||
"custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle},
|
||||
"custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert},
|
||||
"custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert},
|
||||
}
|
||||
for name, v := range customTLSOptions {
|
||||
if len(v.filename) != 0 && *v.field == nil {
|
||||
f, err := os.Open(v.filename)
|
||||
if err != nil {
|
||||
return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err)
|
||||
}
|
||||
defer f.Close()
|
||||
*v.field = f
|
||||
}
|
||||
}
|
||||
|
||||
// Setup HTTP client with custom cert bundle if enabled
|
||||
if opts.CustomCABundle != nil {
|
||||
if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Setup HTTP client TLS certificate and key for client TLS authentication.
|
||||
if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil {
|
||||
if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil {
|
||||
// Do nothing if neither values are available.
|
||||
|
||||
} else {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided",
|
||||
opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHTTPTransport(client *http.Client) (*http.Transport, error) {
|
||||
var t *http.Transport
|
||||
switch v := s.Config.HTTPClient.Transport.(type) {
|
||||
switch v := client.Transport.(type) {
|
||||
case *http.Transport:
|
||||
t = v
|
||||
default:
|
||||
if s.Config.HTTPClient.Transport != nil {
|
||||
return awserr.New("LoadCustomCABundleError",
|
||||
"unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
|
||||
if client.Transport != nil {
|
||||
return nil, fmt.Errorf("unsupported transport, %T", client.Transport)
|
||||
}
|
||||
}
|
||||
if t == nil {
|
||||
// Nil transport implies `http.DefaultTransport` should be used. Since
|
||||
// the SDK cannot modify, nor copy the `DefaultTransport` specifying
|
||||
// the values the next closest behavior.
|
||||
t = getCABundleTransport()
|
||||
t = getCustomTransport()
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func loadCustomCABundle(client *http.Client, bundle io.Reader) error {
|
||||
t, err := getHTTPTransport(client)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadCustomCABundle,
|
||||
"unable to load custom CA bundle, HTTPClient's transport unsupported type", err)
|
||||
}
|
||||
|
||||
p, err := loadCertPool(bundle)
|
||||
@@ -556,7 +639,7 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||
}
|
||||
t.TLSClientConfig.RootCAs = p
|
||||
|
||||
s.Config.HTTPClient.Transport = t
|
||||
client.Transport = t
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -564,19 +647,57 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||
func loadCertPool(r io.Reader) (*x509.CertPool, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, awserr.New("LoadCustomCABundleError",
|
||||
return nil, awserr.New(ErrCodeLoadCustomCABundle,
|
||||
"failed to read custom CA bundle PEM file", err)
|
||||
}
|
||||
|
||||
p := x509.NewCertPool()
|
||||
if !p.AppendCertsFromPEM(b) {
|
||||
return nil, awserr.New("LoadCustomCABundleError",
|
||||
return nil, awserr.New(ErrCodeLoadCustomCABundle,
|
||||
"failed to load custom CA bundle PEM file", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error {
|
||||
t, err := getHTTPTransport(client)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to get usable HTTP transport from client", err)
|
||||
}
|
||||
|
||||
cert, err := ioutil.ReadAll(certFile)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to get read client TLS cert file", err)
|
||||
}
|
||||
|
||||
key, err := ioutil.ReadAll(keyFile)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to get read client TLS key file", err)
|
||||
}
|
||||
|
||||
clientCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return awserr.New(ErrCodeLoadClientTLSCert,
|
||||
"unable to load x509 key pair from client cert", err)
|
||||
}
|
||||
|
||||
tlsCfg := t.TLSClientConfig
|
||||
if tlsCfg == nil {
|
||||
tlsCfg = &tls.Config{}
|
||||
}
|
||||
|
||||
tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert)
|
||||
|
||||
t.TLSClientConfig = tlsCfg
|
||||
client.Transport = t
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mergeConfigSrcs(cfg, userCfg *aws.Config,
|
||||
envCfg envConfig, sharedCfg sharedConfig,
|
||||
handlers request.Handlers,
|
||||
|
||||
13
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
generated
vendored
@@ -34,6 +34,9 @@ const (
|
||||
// Additional Config fields
|
||||
regionKey = `region`
|
||||
|
||||
// custom CA Bundle filename
|
||||
customCABundleKey = `ca_bundle`
|
||||
|
||||
// endpoint discovery group
|
||||
enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
|
||||
|
||||
@@ -90,6 +93,15 @@ type sharedConfig struct {
|
||||
// region
|
||||
Region string
|
||||
|
||||
// CustomCABundle is the file path to a PEM file the SDK will read and
|
||||
// use to configure the HTTP transport with additional CA certs that are
|
||||
// not present in the platforms default CA store.
|
||||
//
|
||||
// This value will be ignored if the file does not exist.
|
||||
//
|
||||
// ca_bundle
|
||||
CustomCABundle string
|
||||
|
||||
// EnableEndpointDiscovery can be enabled in the shared config by setting
|
||||
// endpoint_discovery_enabled to true
|
||||
//
|
||||
@@ -276,6 +288,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
||||
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
|
||||
updateString(&cfg.CredentialSource, section, credentialSourceKey)
|
||||
updateString(&cfg.Region, section, regionKey)
|
||||
updateString(&cfg.CustomCABundle, section, customCABundleKey)
|
||||
|
||||
if section.Has(roleDurationSecondsKey) {
|
||||
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.35.5"
|
||||
const SDKVersion = "1.36.15"
|
||||
|
||||
7
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
@@ -63,9 +63,10 @@ var parseTable = map[ASTKind]map[TokenType]int{
|
||||
TokenNone: MarkCompleteState,
|
||||
},
|
||||
ASTKindEqualExpr: map[TokenType]int{
|
||||
TokenLit: ValueState,
|
||||
TokenWS: SkipTokenState,
|
||||
TokenNL: SkipState,
|
||||
TokenLit: ValueState,
|
||||
TokenWS: SkipTokenState,
|
||||
TokenNL: SkipState,
|
||||
TokenNone: SkipState,
|
||||
},
|
||||
ASTKindStatement: map[TokenType]int{
|
||||
TokenLit: SectionState,
|
||||
|
||||
311
vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/api.go
generated
vendored
311
vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/api.go
generated
vendored
@@ -67,6 +67,12 @@ func (c *MarketplaceMetering) BatchMeterUsageRequest(input *BatchMeterUsageInput
|
||||
//
|
||||
// BatchMeterUsage can process up to 25 UsageRecords at a time.
|
||||
//
|
||||
// A UsageRecord can optionally include multiple usage allocations, to provide
|
||||
// customers with usagedata split into buckets by tags that you define (or allow
|
||||
// the customer to define).
|
||||
//
|
||||
// BatchMeterUsage requests must be less than 1MB in size.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
@@ -87,6 +93,13 @@ func (c *MarketplaceMetering) BatchMeterUsageRequest(input *BatchMeterUsageInput
|
||||
// The usage dimension does not match one of the UsageDimensions associated
|
||||
// with products.
|
||||
//
|
||||
// * InvalidTagException
|
||||
// The tag is invalid, or the number of tags is greater than 5.
|
||||
//
|
||||
// * InvalidUsageAllocationsException
|
||||
// The usage allocation objects are invalid, or the number of allocations is
|
||||
// greater than 500 for a single usage record.
|
||||
//
|
||||
// * InvalidCustomerIdentifierException
|
||||
// You have metered usage for a CustomerIdentifier that does not exist.
|
||||
//
|
||||
@@ -171,6 +184,10 @@ func (c *MarketplaceMetering) MeterUsageRequest(input *MeterUsageInput) (req *re
|
||||
// MeterUsage is authenticated on the buyer's AWS account using credentials
|
||||
// from the EC2 instance, ECS task, or EKS pod.
|
||||
//
|
||||
// MeterUsage can optionally include multiple usage allocations, to provide
|
||||
// customers with usage data split into buckets by tags that you define (or
|
||||
// allow the customer to define).
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
@@ -191,6 +208,13 @@ func (c *MarketplaceMetering) MeterUsageRequest(input *MeterUsageInput) (req *re
|
||||
// The usage dimension does not match one of the UsageDimensions associated
|
||||
// with products.
|
||||
//
|
||||
// * InvalidTagException
|
||||
// The tag is invalid, or the number of tags is greater than 5.
|
||||
//
|
||||
// * InvalidUsageAllocationsException
|
||||
// The usage allocation objects are invalid, or the number of allocations is
|
||||
// greater than 500 for a single usage record.
|
||||
//
|
||||
// * InvalidEndpointRegionException
|
||||
// The endpoint being called is in a AWS Region different from your EC2 instance,
|
||||
// ECS task, or EKS pod. The Region of the Metering Service endpoint and the
|
||||
@@ -1148,6 +1172,62 @@ func (s *InvalidRegionException) RequestID() string {
|
||||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// The tag is invalid, or the number of tags is greater than 5.
|
||||
type InvalidTagException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s InvalidTagException) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s InvalidTagException) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func newErrorInvalidTagException(v protocol.ResponseMetadata) error {
|
||||
return &InvalidTagException{
|
||||
RespMetadata: v,
|
||||
}
|
||||
}
|
||||
|
||||
// Code returns the exception type name.
|
||||
func (s *InvalidTagException) Code() string {
|
||||
return "InvalidTagException"
|
||||
}
|
||||
|
||||
// Message returns the exception's message.
|
||||
func (s *InvalidTagException) Message() string {
|
||||
if s.Message_ != nil {
|
||||
return *s.Message_
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// OrigErr always returns nil, satisfies awserr.Error interface.
|
||||
func (s *InvalidTagException) OrigErr() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *InvalidTagException) Error() string {
|
||||
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
|
||||
}
|
||||
|
||||
// Status code returns the HTTP status code for the request's response error.
|
||||
func (s *InvalidTagException) StatusCode() int {
|
||||
return s.RespMetadata.StatusCode
|
||||
}
|
||||
|
||||
// RequestID returns the service's response RequestID for request.
|
||||
func (s *InvalidTagException) RequestID() string {
|
||||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// Registration token is invalid.
|
||||
type InvalidTokenException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
@@ -1204,6 +1284,63 @@ func (s *InvalidTokenException) RequestID() string {
|
||||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// The usage allocation objects are invalid, or the number of allocations is
|
||||
// greater than 500 for a single usage record.
|
||||
type InvalidUsageAllocationsException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
|
||||
|
||||
Message_ *string `locationName:"message" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s InvalidUsageAllocationsException) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s InvalidUsageAllocationsException) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func newErrorInvalidUsageAllocationsException(v protocol.ResponseMetadata) error {
|
||||
return &InvalidUsageAllocationsException{
|
||||
RespMetadata: v,
|
||||
}
|
||||
}
|
||||
|
||||
// Code returns the exception type name.
|
||||
func (s *InvalidUsageAllocationsException) Code() string {
|
||||
return "InvalidUsageAllocationsException"
|
||||
}
|
||||
|
||||
// Message returns the exception's message.
|
||||
func (s *InvalidUsageAllocationsException) Message() string {
|
||||
if s.Message_ != nil {
|
||||
return *s.Message_
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// OrigErr always returns nil, satisfies awserr.Error interface.
|
||||
func (s *InvalidUsageAllocationsException) OrigErr() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *InvalidUsageAllocationsException) Error() string {
|
||||
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
|
||||
}
|
||||
|
||||
// Status code returns the HTTP status code for the request's response error.
|
||||
func (s *InvalidUsageAllocationsException) StatusCode() int {
|
||||
return s.RespMetadata.StatusCode
|
||||
}
|
||||
|
||||
// RequestID returns the service's response RequestID for request.
|
||||
func (s *InvalidUsageAllocationsException) RequestID() string {
|
||||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// The usage dimension does not match one of the UsageDimensions associated
|
||||
// with products.
|
||||
type InvalidUsageDimensionException struct {
|
||||
@@ -1283,6 +1420,13 @@ type MeterUsageInput struct {
|
||||
// Timestamp is a required field
|
||||
Timestamp *time.Time `type:"timestamp" required:"true"`
|
||||
|
||||
// The set of UsageAllocations to submit.
|
||||
//
|
||||
// The sum of all UsageAllocation quantities must equal the UsageQuantity of
|
||||
// the MeterUsage request, and each UsageAllocation must have a unique set of
|
||||
// tags (include no tags).
|
||||
UsageAllocations []*UsageAllocation `min:"1" type:"list"`
|
||||
|
||||
// It will be one of the fcp dimension name provided during the publishing of
|
||||
// the product.
|
||||
//
|
||||
@@ -1315,12 +1459,25 @@ func (s *MeterUsageInput) Validate() error {
|
||||
if s.Timestamp == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Timestamp"))
|
||||
}
|
||||
if s.UsageAllocations != nil && len(s.UsageAllocations) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("UsageAllocations", 1))
|
||||
}
|
||||
if s.UsageDimension == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("UsageDimension"))
|
||||
}
|
||||
if s.UsageDimension != nil && len(*s.UsageDimension) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("UsageDimension", 1))
|
||||
}
|
||||
if s.UsageAllocations != nil {
|
||||
for i, v := range s.UsageAllocations {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UsageAllocations", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
@@ -1346,6 +1503,12 @@ func (s *MeterUsageInput) SetTimestamp(v time.Time) *MeterUsageInput {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetUsageAllocations sets the UsageAllocations field's value.
|
||||
func (s *MeterUsageInput) SetUsageAllocations(v []*UsageAllocation) *MeterUsageInput {
|
||||
s.UsageAllocations = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetUsageDimension sets the UsageDimension field's value.
|
||||
func (s *MeterUsageInput) SetUsageDimension(v string) *MeterUsageInput {
|
||||
s.UsageDimension = &v
|
||||
@@ -1619,6 +1782,67 @@ func (s *ResolveCustomerOutput) SetProductCode(v string) *ResolveCustomerOutput
|
||||
return s
|
||||
}
|
||||
|
||||
// Metadata assigned to an allocation. Each tag is made up of a key and a value.
|
||||
type Tag struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// One part of a key-value pair that makes up a tag. A key is a label that acts
|
||||
// like a category for the specific tag values.
|
||||
//
|
||||
// Key is a required field
|
||||
Key *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// One part of a key-value pair that makes up a tag. A value acts as a descriptor
|
||||
// within a tag category (key). The value can be empty or null.
|
||||
//
|
||||
// Value is a required field
|
||||
Value *string `min:"1" type:"string" required:"true"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s Tag) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s Tag) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *Tag) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "Tag"}
|
||||
if s.Key == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Key"))
|
||||
}
|
||||
if s.Key != nil && len(*s.Key) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("Key", 1))
|
||||
}
|
||||
if s.Value == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Value"))
|
||||
}
|
||||
if s.Value != nil && len(*s.Value) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("Value", 1))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetKey sets the Key field's value.
|
||||
func (s *Tag) SetKey(v string) *Tag {
|
||||
s.Key = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetValue sets the Value field's value.
|
||||
func (s *Tag) SetValue(v string) *Tag {
|
||||
s.Value = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// The calls to the API are throttled.
|
||||
type ThrottlingException struct {
|
||||
_ struct{} `type:"structure"`
|
||||
@@ -1731,6 +1955,70 @@ func (s *TimestampOutOfBoundsException) RequestID() string {
|
||||
return s.RespMetadata.RequestID
|
||||
}
|
||||
|
||||
// Usage allocations allow you to split usage into buckets by tags.
|
||||
//
|
||||
// Each UsageAllocation indicates the usage quantity for a specific set of tags.
|
||||
type UsageAllocation struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The total quantity allocated to this bucket of usage.
|
||||
//
|
||||
// AllocatedUsageQuantity is a required field
|
||||
AllocatedUsageQuantity *int64 `type:"integer" required:"true"`
|
||||
|
||||
// The set of tags that define the bucket of usage. For the bucket of items
|
||||
// with no tags, this parameter can be left out.
|
||||
Tags []*Tag `min:"1" type:"list"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s UsageAllocation) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s UsageAllocation) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *UsageAllocation) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "UsageAllocation"}
|
||||
if s.AllocatedUsageQuantity == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("AllocatedUsageQuantity"))
|
||||
}
|
||||
if s.Tags != nil && len(s.Tags) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("Tags", 1))
|
||||
}
|
||||
if s.Tags != nil {
|
||||
for i, v := range s.Tags {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetAllocatedUsageQuantity sets the AllocatedUsageQuantity field's value.
|
||||
func (s *UsageAllocation) SetAllocatedUsageQuantity(v int64) *UsageAllocation {
|
||||
s.AllocatedUsageQuantity = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTags sets the Tags field's value.
|
||||
func (s *UsageAllocation) SetTags(v []*Tag) *UsageAllocation {
|
||||
s.Tags = v
|
||||
return s
|
||||
}
|
||||
|
||||
// A UsageRecord indicates a quantity of usage for a given product, customer,
|
||||
// dimension and time.
|
||||
//
|
||||
@@ -1763,6 +2051,10 @@ type UsageRecord struct {
|
||||
//
|
||||
// Timestamp is a required field
|
||||
Timestamp *time.Time `type:"timestamp" required:"true"`
|
||||
|
||||
// The set of UsageAllocations to submit. The sum of all UsageAllocation quantities
|
||||
// must equal the Quantity of the UsageRecord.
|
||||
UsageAllocations []*UsageAllocation `min:"1" type:"list"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -1793,6 +2085,19 @@ func (s *UsageRecord) Validate() error {
|
||||
if s.Timestamp == nil {
|
||||
invalidParams.Add(request.NewErrParamRequired("Timestamp"))
|
||||
}
|
||||
if s.UsageAllocations != nil && len(s.UsageAllocations) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("UsageAllocations", 1))
|
||||
}
|
||||
if s.UsageAllocations != nil {
|
||||
for i, v := range s.UsageAllocations {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
if err := v.Validate(); err != nil {
|
||||
invalidParams.AddNested(fmt.Sprintf("%s[%v]", "UsageAllocations", i), err.(request.ErrInvalidParams))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
@@ -1824,6 +2129,12 @@ func (s *UsageRecord) SetTimestamp(v time.Time) *UsageRecord {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetUsageAllocations sets the UsageAllocations field's value.
|
||||
func (s *UsageRecord) SetUsageAllocations(v []*UsageAllocation) *UsageRecord {
|
||||
s.UsageAllocations = v
|
||||
return s
|
||||
}
|
||||
|
||||
// A UsageRecordResult indicates the status of a given UsageRecord processed
|
||||
// by BatchMeterUsage.
|
||||
type UsageRecordResult struct {
|
||||
|
||||
15
vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/errors.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go/service/marketplacemetering/errors.go
generated
vendored
@@ -81,12 +81,25 @@ const (
|
||||
// when calling RegisterUsage.
|
||||
ErrCodeInvalidRegionException = "InvalidRegionException"
|
||||
|
||||
// ErrCodeInvalidTagException for service response error code
|
||||
// "InvalidTagException".
|
||||
//
|
||||
// The tag is invalid, or the number of tags is greater than 5.
|
||||
ErrCodeInvalidTagException = "InvalidTagException"
|
||||
|
||||
// ErrCodeInvalidTokenException for service response error code
|
||||
// "InvalidTokenException".
|
||||
//
|
||||
// Registration token is invalid.
|
||||
ErrCodeInvalidTokenException = "InvalidTokenException"
|
||||
|
||||
// ErrCodeInvalidUsageAllocationsException for service response error code
|
||||
// "InvalidUsageAllocationsException".
|
||||
//
|
||||
// The usage allocation objects are invalid, or the number of allocations is
|
||||
// greater than 500 for a single usage record.
|
||||
ErrCodeInvalidUsageAllocationsException = "InvalidUsageAllocationsException"
|
||||
|
||||
// ErrCodeInvalidUsageDimensionException for service response error code
|
||||
// "InvalidUsageDimensionException".
|
||||
//
|
||||
@@ -125,7 +138,9 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
|
||||
"InvalidProductCodeException": newErrorInvalidProductCodeException,
|
||||
"InvalidPublicKeyVersionException": newErrorInvalidPublicKeyVersionException,
|
||||
"InvalidRegionException": newErrorInvalidRegionException,
|
||||
"InvalidTagException": newErrorInvalidTagException,
|
||||
"InvalidTokenException": newErrorInvalidTokenException,
|
||||
"InvalidUsageAllocationsException": newErrorInvalidUsageAllocationsException,
|
||||
"InvalidUsageDimensionException": newErrorInvalidUsageDimensionException,
|
||||
"PlatformNotSupportedException": newErrorPlatformNotSupportedException,
|
||||
"ThrottlingException": newErrorThrottlingException,
|
||||
|
||||
12
vendor/github.com/blevesearch/bleve/go.mod
generated
vendored
12
vendor/github.com/blevesearch/bleve/go.mod
generated
vendored
@@ -4,15 +4,15 @@ go 1.13
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.23
|
||||
github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040
|
||||
github.com/blevesearch/blevex v1.0.0
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3
|
||||
github.com/blevesearch/segment v0.9.0
|
||||
github.com/blevesearch/snowballstem v0.9.0
|
||||
github.com/blevesearch/zap/v11 v11.0.12
|
||||
github.com/blevesearch/zap/v12 v12.0.12
|
||||
github.com/blevesearch/zap/v13 v13.0.4
|
||||
github.com/blevesearch/zap/v14 v14.0.3
|
||||
github.com/blevesearch/zap/v15 v15.0.1
|
||||
github.com/blevesearch/zap/v11 v11.0.14
|
||||
github.com/blevesearch/zap/v12 v12.0.14
|
||||
github.com/blevesearch/zap/v13 v13.0.6
|
||||
github.com/blevesearch/zap/v14 v14.0.5
|
||||
github.com/blevesearch/zap/v15 v15.0.3
|
||||
github.com/couchbase/moss v0.1.0
|
||||
github.com/couchbase/vellum v1.0.2
|
||||
github.com/golang/protobuf v1.3.2
|
||||
|
||||
41
vendor/github.com/blevesearch/bleve/mapping/document.go
generated
vendored
41
vendor/github.com/blevesearch/bleve/mapping/document.go
generated
vendored
@@ -106,28 +106,31 @@ func (dm *DocumentMapping) fieldDescribedByPath(path string) *FieldMapping {
|
||||
return subDocMapping.fieldDescribedByPath(encodePath(pathElements[1:]))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// just 1 path elememnt
|
||||
// first look for property name with empty field
|
||||
for propName, subDocMapping := range dm.Properties {
|
||||
if propName == pathElements[0] {
|
||||
// found property name match, now look at its fields
|
||||
for _, field := range subDocMapping.Fields {
|
||||
if field.Name == "" || field.Name == pathElements[0] {
|
||||
// match
|
||||
return field
|
||||
}
|
||||
}
|
||||
|
||||
// either the path just had one element
|
||||
// or it had multiple, but no match for the first element at this level
|
||||
// look for match with full path
|
||||
|
||||
// first look for property name with empty field
|
||||
for propName, subDocMapping := range dm.Properties {
|
||||
if propName == path {
|
||||
// found property name match, now look at its fields
|
||||
for _, field := range subDocMapping.Fields {
|
||||
if field.Name == "" || field.Name == path {
|
||||
// match
|
||||
return field
|
||||
}
|
||||
}
|
||||
}
|
||||
// next, walk the properties again, looking for field overriding the name
|
||||
for propName, subDocMapping := range dm.Properties {
|
||||
if propName != pathElements[0] {
|
||||
// property name isn't a match, but field name could override it
|
||||
for _, field := range subDocMapping.Fields {
|
||||
if field.Name == pathElements[0] {
|
||||
return field
|
||||
}
|
||||
}
|
||||
// next, walk the properties again, looking for field overriding the name
|
||||
for propName, subDocMapping := range dm.Properties {
|
||||
if propName != path {
|
||||
// property name isn't a match, but field name could override it
|
||||
for _, field := range subDocMapping.Fields {
|
||||
if field.Name == path {
|
||||
return field
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/blevesearch/bleve/mapping/index.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/mapping/index.go
generated
vendored
@@ -50,7 +50,7 @@ type IndexMappingImpl struct {
|
||||
DefaultField string `json:"default_field"`
|
||||
StoreDynamic bool `json:"store_dynamic"`
|
||||
IndexDynamic bool `json:"index_dynamic"`
|
||||
DocValuesDynamic bool `json:"docvalues_dynamic,omitempty"`
|
||||
DocValuesDynamic bool `json:"docvalues_dynamic"`
|
||||
CustomAnalysis *customAnalysis `json:"analysis,omitempty"`
|
||||
cache *registry.Cache
|
||||
}
|
||||
|
||||
2
vendor/github.com/blevesearch/zap/v11/go.mod
generated
vendored
2
vendor/github.com/blevesearch/zap/v11/go.mod
generated
vendored
@@ -4,7 +4,7 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.23
|
||||
github.com/blevesearch/bleve v1.0.12
|
||||
github.com/blevesearch/bleve v1.0.14
|
||||
github.com/blevesearch/mmap-go v1.0.2
|
||||
github.com/couchbase/vellum v1.0.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
|
||||
2
vendor/github.com/blevesearch/zap/v12/go.mod
generated
vendored
2
vendor/github.com/blevesearch/zap/v12/go.mod
generated
vendored
@@ -4,7 +4,7 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.23
|
||||
github.com/blevesearch/bleve v1.0.12
|
||||
github.com/blevesearch/bleve v1.0.14
|
||||
github.com/blevesearch/mmap-go v1.0.2
|
||||
github.com/couchbase/vellum v1.0.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
|
||||
2
vendor/github.com/blevesearch/zap/v13/go.mod
generated
vendored
2
vendor/github.com/blevesearch/zap/v13/go.mod
generated
vendored
@@ -4,7 +4,7 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.23
|
||||
github.com/blevesearch/bleve v1.0.12
|
||||
github.com/blevesearch/bleve v1.0.14
|
||||
github.com/blevesearch/mmap-go v1.0.2
|
||||
github.com/couchbase/vellum v1.0.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
|
||||
2
vendor/github.com/blevesearch/zap/v14/go.mod
generated
vendored
2
vendor/github.com/blevesearch/zap/v14/go.mod
generated
vendored
@@ -4,7 +4,7 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.23
|
||||
github.com/blevesearch/bleve v1.0.12
|
||||
github.com/blevesearch/bleve v1.0.14
|
||||
github.com/blevesearch/mmap-go v1.0.2
|
||||
github.com/couchbase/vellum v1.0.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
|
||||
2
vendor/github.com/blevesearch/zap/v15/go.mod
generated
vendored
2
vendor/github.com/blevesearch/zap/v15/go.mod
generated
vendored
@@ -4,7 +4,7 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.23
|
||||
github.com/blevesearch/bleve v1.0.12
|
||||
github.com/blevesearch/bleve v1.0.14
|
||||
github.com/blevesearch/mmap-go v1.0.2
|
||||
github.com/couchbase/vellum v1.0.2
|
||||
github.com/golang/snappy v0.0.1
|
||||
|
||||
8
vendor/github.com/blevesearch/zap/v15/intDecoder.go
generated
vendored
8
vendor/github.com/blevesearch/zap/v15/intDecoder.go
generated
vendored
@@ -105,6 +105,10 @@ func (d *chunkedIntDecoder) readUvarint() (uint64, error) {
|
||||
return d.r.ReadUvarint()
|
||||
}
|
||||
|
||||
func (d *chunkedIntDecoder) readBytes(start, end int) []byte {
|
||||
return d.curChunkBytes[start:end]
|
||||
}
|
||||
|
||||
func (d *chunkedIntDecoder) SkipUvarint() {
|
||||
d.r.SkipUvarint()
|
||||
}
|
||||
@@ -116,3 +120,7 @@ func (d *chunkedIntDecoder) SkipBytes(count int) {
|
||||
func (d *chunkedIntDecoder) Len() int {
|
||||
return d.r.Len()
|
||||
}
|
||||
|
||||
func (d *chunkedIntDecoder) remainingLen() int {
|
||||
return len(d.curChunkBytes) - d.r.Len()
|
||||
}
|
||||
|
||||
51
vendor/github.com/blevesearch/zap/v15/merge.go
generated
vendored
51
vendor/github.com/blevesearch/zap/v15/merge.go
generated
vendored
@@ -341,11 +341,16 @@ func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap,
|
||||
|
||||
postItr = postings.iterator(true, true, true, postItr)
|
||||
|
||||
// can no longer optimize by copying, since chunk factor could have changed
|
||||
lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs(
|
||||
fieldsMap, term, postItr, newDocNums[itrI], newRoaring,
|
||||
tfEncoder, locEncoder, bufLoc)
|
||||
|
||||
if fieldsSame {
|
||||
// can optimize by copying freq/norm/loc bytes directly
|
||||
lastDocNum, lastFreq, lastNorm, err = mergeTermFreqNormLocsByCopying(
|
||||
term, postItr, newDocNums[itrI], newRoaring,
|
||||
tfEncoder, locEncoder)
|
||||
} else {
|
||||
lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs(
|
||||
fieldsMap, term, postItr, newDocNums[itrI], newRoaring,
|
||||
tfEncoder, locEncoder, bufLoc)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -473,6 +478,42 @@ func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap,
|
||||
return rv, fieldDvLocsOffset, nil
|
||||
}
|
||||
|
||||
func mergeTermFreqNormLocsByCopying(term []byte, postItr *PostingsIterator,
|
||||
newDocNums []uint64, newRoaring *roaring.Bitmap,
|
||||
tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder) (
|
||||
lastDocNum uint64, lastFreq uint64, lastNorm uint64, err error) {
|
||||
nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err :=
|
||||
postItr.nextBytes()
|
||||
for err == nil && len(nextFreqNormBytes) > 0 {
|
||||
hitNewDocNum := newDocNums[nextDocNum]
|
||||
if hitNewDocNum == docDropped {
|
||||
return 0, 0, 0, fmt.Errorf("see hit with dropped doc num")
|
||||
}
|
||||
|
||||
newRoaring.Add(uint32(hitNewDocNum))
|
||||
err = tfEncoder.AddBytes(hitNewDocNum, nextFreqNormBytes)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
if len(nextLocBytes) > 0 {
|
||||
err = locEncoder.AddBytes(hitNewDocNum, nextLocBytes)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
lastDocNum = hitNewDocNum
|
||||
lastFreq = nextFreq
|
||||
lastNorm = nextNorm
|
||||
|
||||
nextDocNum, nextFreq, nextNorm, nextFreqNormBytes, nextLocBytes, err =
|
||||
postItr.nextBytes()
|
||||
}
|
||||
|
||||
return lastDocNum, lastFreq, lastNorm, err
|
||||
}
|
||||
|
||||
func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator,
|
||||
newDocNums []uint64, newRoaring *roaring.Bitmap,
|
||||
tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) (
|
||||
|
||||
52
vendor/github.com/blevesearch/zap/v15/posting.go
generated
vendored
52
vendor/github.com/blevesearch/zap/v15/posting.go
generated
vendored
@@ -588,6 +588,58 @@ func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool,
|
||||
return uint64(n), true, nil
|
||||
}
|
||||
|
||||
var freqHasLocs1Hit = encodeFreqHasLocs(1, false)
|
||||
|
||||
// nextBytes returns the docNum and the encoded freq & loc bytes for
|
||||
// the next posting
|
||||
func (i *PostingsIterator) nextBytes() (
|
||||
docNumOut uint64, freq uint64, normBits uint64,
|
||||
bytesFreqNorm []byte, bytesLoc []byte, err error) {
|
||||
docNum, exists, err := i.nextDocNumAtOrAfter(0)
|
||||
if err != nil || !exists {
|
||||
return 0, 0, 0, nil, nil, err
|
||||
}
|
||||
|
||||
if i.normBits1Hit != 0 {
|
||||
if i.buf == nil {
|
||||
i.buf = make([]byte, binary.MaxVarintLen64*2)
|
||||
}
|
||||
n := binary.PutUvarint(i.buf, freqHasLocs1Hit)
|
||||
n += binary.PutUvarint(i.buf[n:], i.normBits1Hit)
|
||||
return docNum, uint64(1), i.normBits1Hit, i.buf[:n], nil, nil
|
||||
}
|
||||
|
||||
startFreqNorm := i.freqNormReader.remainingLen()
|
||||
|
||||
var hasLocs bool
|
||||
|
||||
freq, normBits, hasLocs, err = i.readFreqNormHasLocs()
|
||||
if err != nil {
|
||||
return 0, 0, 0, nil, nil, err
|
||||
}
|
||||
|
||||
endFreqNorm := i.freqNormReader.remainingLen()
|
||||
bytesFreqNorm = i.freqNormReader.readBytes(startFreqNorm, endFreqNorm)
|
||||
|
||||
if hasLocs {
|
||||
startLoc := i.locReader.remainingLen()
|
||||
|
||||
numLocsBytes, err := i.locReader.readUvarint()
|
||||
if err != nil {
|
||||
return 0, 0, 0, nil, nil,
|
||||
fmt.Errorf("error reading location nextBytes numLocs: %v", err)
|
||||
}
|
||||
|
||||
// skip over all the location bytes
|
||||
i.locReader.SkipBytes(int(numLocsBytes))
|
||||
|
||||
endLoc := i.locReader.remainingLen()
|
||||
bytesLoc = i.locReader.readBytes(startLoc, endLoc)
|
||||
}
|
||||
|
||||
return docNum, freq, normBits, bytesFreqNorm, bytesLoc, nil
|
||||
}
|
||||
|
||||
// optimization when the postings list is "clean" (e.g., no updates &
|
||||
// no deletions) where the all bitmap is the same as the actual bitmap
|
||||
func (i *PostingsIterator) nextDocNumAtOrAfterClean(
|
||||
|
||||
2
vendor/github.com/blevesearch/zap/v15/segment.go
generated
vendored
2
vendor/github.com/blevesearch/zap/v15/segment.go
generated
vendored
@@ -194,7 +194,7 @@ func (s *Segment) loadConfig() error {
|
||||
verOffset := crcOffset - 4
|
||||
s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4])
|
||||
if s.version != Version {
|
||||
return fmt.Errorf("unsupported version %d", s.version)
|
||||
return fmt.Errorf("unsupported version %d != %d", s.version, Version)
|
||||
}
|
||||
|
||||
chunkOffset := verOffset - 4
|
||||
|
||||
13
vendor/github.com/fatih/color/README.md
generated
vendored
13
vendor/github.com/fatih/color/README.md
generated
vendored
@@ -1,20 +1,11 @@
|
||||
# Archived project. No maintenance.
|
||||
|
||||
This project is not maintained anymore and is archived. Feel free to fork and
|
||||
make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/)
|
||||
|
||||
Thanks to everyone for their valuable feedback and contributions.
|
||||
|
||||
|
||||
# Color [](https://godoc.org/github.com/fatih/color)
|
||||
# color [](https://github.com/fatih/color/actions) [](https://pkg.go.dev/github.com/fatih/color)
|
||||
|
||||
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||
has support for Windows too! The API can be used in several ways, pick one that
|
||||
suits you.
|
||||
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Install
|
||||
|
||||
4
vendor/github.com/fatih/color/go.mod
generated
vendored
4
vendor/github.com/fatih/color/go.mod
generated
vendored
@@ -3,6 +3,6 @@ module github.com/fatih/color
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/mattn/go-colorable v0.1.4
|
||||
github.com/mattn/go-isatty v0.0.11
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mattn/go-isatty v0.0.12
|
||||
)
|
||||
|
||||
15
vendor/github.com/fatih/color/go.sum
generated
vendored
15
vendor/github.com/fatih/color/go.sum
generated
vendored
@@ -1,8 +1,7 @@
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
8
vendor/github.com/getsentry/sentry-go/.golangci.yml
generated
vendored
8
vendor/github.com/getsentry/sentry-go/.golangci.yml
generated
vendored
@@ -39,6 +39,14 @@ issues:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- prealloc
|
||||
- path: _test\.go
|
||||
text: "G306:"
|
||||
linters:
|
||||
- gosec
|
||||
- path: errors_test\.go
|
||||
linters:
|
||||
- unused
|
||||
- path: http/example_test\.go
|
||||
linters:
|
||||
- errcheck
|
||||
- bodyclose
|
||||
|
||||
11
vendor/github.com/getsentry/sentry-go/.travis.yml
generated
vendored
11
vendor/github.com/getsentry/sentry-go/.travis.yml
generated
vendored
@@ -1,9 +1,9 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- master
|
||||
|
||||
env:
|
||||
@@ -33,6 +33,15 @@ before_install:
|
||||
# branch, since Travis clones only the target branch.
|
||||
- git fetch origin master:remotes/origin/master
|
||||
|
||||
install: |
|
||||
if [[ $GO111MODULE == off ]]; then
|
||||
# Iris is not supported in legacy GOPATH mode. We delete the source code
|
||||
# because otherwise lint, build, and test steps would fail.
|
||||
rm -vrf ./iris/ ./example/iris/
|
||||
go get -v -t ./...
|
||||
fi
|
||||
# go get is not required in Module mode
|
||||
|
||||
script:
|
||||
- golangci-lint run --new-from-rev=$(git merge-base origin/master HEAD)
|
||||
- go build ./...
|
||||
|
||||
35
vendor/github.com/getsentry/sentry-go/CHANGELOG.md
generated
vendored
35
vendor/github.com/getsentry/sentry-go/CHANGELOG.md
generated
vendored
@@ -1,5 +1,38 @@
|
||||
# Changelog
|
||||
|
||||
## v0.9.0
|
||||
|
||||
- feat: Initial tracing and performance monitoring support (#285)
|
||||
- doc: Revamp sentryhttp documentation (#304)
|
||||
- fix: Hub.PopScope never empties the scope stack (#300)
|
||||
- ref: Report Event.Timestamp in local time (#299)
|
||||
- ref: Report Breadcrumb.Timestamp in local time (#299)
|
||||
|
||||
_NOTE:_
|
||||
This version introduces support for [Sentry's Performance Monitoring](https://docs.sentry.io/platforms/go/performance/).
|
||||
The new tracing capabilities are beta, and we plan to expand them on future versions. Feedback is welcome, please open new issues on GitHub.
|
||||
The `sentryhttp` package got better API docs, an [updated usage example](https://github.com/getsentry/sentry-go/tree/master/example/http) and support for creating automatic transactions as part of Performance Monitoring.
|
||||
|
||||
## v0.8.0
|
||||
|
||||
- build: Bump required version of Iris (#296)
|
||||
- fix: avoid unnecessary allocation in Client.processEvent (#293)
|
||||
- doc: Remove deprecation of sentryhttp.HandleFunc (#284)
|
||||
- ref: Update sentryhttp example (#283)
|
||||
- doc: Improve documentation of sentryhttp package (#282)
|
||||
- doc: Clarify SampleRate documentation (#279)
|
||||
- fix: Remove RawStacktrace (#278)
|
||||
- docs: Add example of custom HTTP transport
|
||||
- ci: Test against go1.15, drop go1.12 support (#271)
|
||||
|
||||
_NOTE:_
|
||||
This version comes with a few updates. Some examples and documentation have been
|
||||
improved. We've bumped the supported version of the Iris framework to avoid
|
||||
LGPL-licensed modules in the module dependency graph.
|
||||
The `Exception.RawStacktrace` and `Thread.RawStacktrace` fields have been
|
||||
removed to conform to Sentry's ingestion protocol, only `Exception.Stacktrace`
|
||||
and `Thread.Stacktrace` should appear in user code.
|
||||
|
||||
## v0.7.0
|
||||
|
||||
- feat: Include original error when event cannot be encoded as JSON (#258)
|
||||
@@ -131,7 +164,7 @@ Please verify the usage of `sentry.Flush` in your code base.
|
||||
|
||||
## v0.3.0
|
||||
|
||||
- feat: Retry event marshalling without contextual data if the first pass fails
|
||||
- feat: Retry event marshaling without contextual data if the first pass fails
|
||||
- fix: Include `url.Parse` error in `DsnParseError`
|
||||
- fix: Make more `Scope` methods safe for concurrency
|
||||
- fix: Synchronize concurrent access to `Hub.client`
|
||||
|
||||
118
vendor/github.com/getsentry/sentry-go/README.md
generated
vendored
118
vendor/github.com/getsentry/sentry-go/README.md
generated
vendored
@@ -52,118 +52,39 @@ Check out the [list of released versions](https://pkg.go.dev/github.com/getsentr
|
||||
## Configuration
|
||||
|
||||
To use `sentry-go`, you’ll need to import the `sentry-go` package and initialize
|
||||
it with your DSN and other [options](https://godoc.org/github.com/getsentry/sentry-go#ClientOptions).
|
||||
it with your DSN and other [options](https://pkg.go.dev/github.com/getsentry/sentry-go#ClientOptions).
|
||||
|
||||
If not specified in the SDK initialization, the
|
||||
[DSN](https://docs.sentry.io/error-reporting/configuration/?platform=go#dsn),
|
||||
[Release](https://docs.sentry.io/workflow/releases/?platform=go) and
|
||||
[Environment](https://docs.sentry.io/enriching-error-data/environments/?platform=go)
|
||||
[DSN](https://docs.sentry.io/product/sentry-basics/dsn-explainer/),
|
||||
[Release](https://docs.sentry.io/product/releases/) and
|
||||
[Environment](https://docs.sentry.io/product/sentry-basics/environments/)
|
||||
are read from the environment variables `SENTRY_DSN`, `SENTRY_RELEASE` and
|
||||
`SENTRY_ENVIRONMENT`, respectively.
|
||||
|
||||
More on this in the [Configuration](https://docs.sentry.io/platforms/go/config/)
|
||||
section of the official Sentry documentation.
|
||||
More on this in the [Configuration section of the official Sentry Go SDK documentation](https://docs.sentry.io/platforms/go/configuration/).
|
||||
|
||||
## Usage
|
||||
|
||||
The SDK must be initialized with a call to `sentry.Init`. The default transport
|
||||
is asynchronous and thus most programs should call `sentry.Flush` to wait until
|
||||
buffered events are sent to Sentry right before the program terminates.
|
||||
The SDK supports reporting errors and tracking application performance.
|
||||
|
||||
Typically, `sentry.Init` is called in the beginning of `func main` and
|
||||
`sentry.Flush` is [deferred](https://golang.org/ref/spec#Defer_statements) right
|
||||
after.
|
||||
To get started, have a look at one of our [examples](example/):
|
||||
- [Basic error instrumentation](example/basic/main.go)
|
||||
- [Error and tracing for HTTP servers](example/http/main.go)
|
||||
|
||||
> Note that if the program terminates with a call to
|
||||
> [`os.Exit`](https://golang.org/pkg/os/#Exit), either directly or indirectly
|
||||
> via another function like `log.Fatal`, deferred functions are not run.
|
||||
>
|
||||
> In that case, and if it is important for you to report outstanding events
|
||||
> before terminating the program, arrange for `sentry.Flush` to be called before
|
||||
> the program terminates.
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
// This is an example program that makes an HTTP request and prints response
|
||||
// headers. Whenever a request fails, the error is reported to Sentry.
|
||||
//
|
||||
// Try it by running:
|
||||
//
|
||||
// go run main.go
|
||||
// go run main.go https://sentry.io
|
||||
// go run main.go bad-url
|
||||
//
|
||||
// To actually report events to Sentry, set the DSN either by editing the
|
||||
// appropriate line below or setting the environment variable SENTRY_DSN to
|
||||
// match the DSN of your Sentry project.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
log.Fatalf("usage: %s URL", os.Args[0])
|
||||
}
|
||||
|
||||
err := sentry.Init(sentry.ClientOptions{
|
||||
// Either set your DSN here or set the SENTRY_DSN environment variable.
|
||||
Dsn: "",
|
||||
// Enable printing of SDK debug messages.
|
||||
// Useful when getting started or trying to figure something out.
|
||||
Debug: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("sentry.Init: %s", err)
|
||||
}
|
||||
// Flush buffered events before the program terminates.
|
||||
// Set the timeout to the maximum duration the program can afford to wait.
|
||||
defer sentry.Flush(2 * time.Second)
|
||||
|
||||
resp, err := http.Get(os.Args[1])
|
||||
if err != nil {
|
||||
sentry.CaptureException(err)
|
||||
log.Printf("reported to Sentry: %s", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
for header, values := range resp.Header {
|
||||
for _, value := range values {
|
||||
fmt.Printf("%s=%s\n", header, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For your convenience, this example is available at
|
||||
[`example/basic/main.go`](example/basic/main.go).
|
||||
There are also more examples in the
|
||||
[example](example) directory.
|
||||
We also provide a [complete API reference](https://pkg.go.dev/github.com/getsentry/sentry-go).
|
||||
|
||||
For more detailed information about how to get the most out of `sentry-go`,
|
||||
checkout the official documentation:
|
||||
|
||||
- [Configuration](https://docs.sentry.io/platforms/go/config)
|
||||
- [Error Reporting](https://docs.sentry.io/error-reporting/quickstart?platform=go)
|
||||
- [Enriching Error Data](https://docs.sentry.io/enriching-error-data/additional-data/?platform=go)
|
||||
- [Transports](https://docs.sentry.io/platforms/go/transports)
|
||||
- [Integrations](https://docs.sentry.io/platforms/go/integrations)
|
||||
- [net/http](https://docs.sentry.io/platforms/go/http)
|
||||
- [echo](https://docs.sentry.io/platforms/go/echo)
|
||||
- [fasthttp](https://docs.sentry.io/platforms/go/fasthttp)
|
||||
- [gin](https://docs.sentry.io/platforms/go/gin)
|
||||
- [iris](https://docs.sentry.io/platforms/go/iris)
|
||||
- [martini](https://docs.sentry.io/platforms/go/martini)
|
||||
- [negroni](https://docs.sentry.io/platforms/go/negroni)
|
||||
- [Sentry Go SDK documentation](https://docs.sentry.io/platforms/go/)
|
||||
- Guides:
|
||||
- [net/http](https://docs.sentry.io/platforms/go/guides/http/)
|
||||
- [echo](https://docs.sentry.io/platforms/go/guides/echo/)
|
||||
- [fasthttp](https://docs.sentry.io/platforms/go/guides/fasthttp/)
|
||||
- [gin](https://docs.sentry.io/platforms/go/guides/gin/)
|
||||
- [iris](https://docs.sentry.io/platforms/go/guides/iris/)
|
||||
- [martini](https://docs.sentry.io/platforms/go/guides/martini/)
|
||||
- [negroni](https://docs.sentry.io/platforms/go/guides/negroni/)
|
||||
|
||||
## Resources
|
||||
|
||||
@@ -177,7 +98,6 @@ checkout the official documentation:
|
||||
- [](http://stackoverflow.com/questions/tagged/sentry)
|
||||
- [](https://twitter.com/intent/follow?screen_name=getsentry)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
Licensed under
|
||||
|
||||
116
vendor/github.com/getsentry/sentry-go/client.go
generated
vendored
116
vendor/github.com/getsentry/sentry-go/client.go
generated
vendored
@@ -3,6 +3,7 @@ package sentry
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -14,6 +15,8 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go/internal/debug"
|
||||
)
|
||||
|
||||
// maxErrorDepth is the maximum number of errors reported in a chain of errors.
|
||||
@@ -88,7 +91,9 @@ var globalEventProcessors []EventProcessor
|
||||
// AddGlobalEventProcessor adds processor to the global list of event
|
||||
// processors. Global event processors apply to all events.
|
||||
//
|
||||
// Deprecated: Use Scope.AddEventProcessor or Client.AddEventProcessor instead.
|
||||
// AddGlobalEventProcessor is deprecated. Most users will prefer to initialize
|
||||
// the SDK with Init and provide a ClientOptions.BeforeSend function or use
|
||||
// Scope.AddEventProcessor instead.
|
||||
func AddGlobalEventProcessor(processor EventProcessor) {
|
||||
globalEventProcessors = append(globalEventProcessors, processor)
|
||||
}
|
||||
@@ -110,8 +115,15 @@ type ClientOptions struct {
|
||||
// Configures whether SDK should generate and attach stacktraces to pure
|
||||
// capture message calls.
|
||||
AttachStacktrace bool
|
||||
// The sample rate for event submission (0.0 - 1.0, defaults to 1.0).
|
||||
// The sample rate for event submission in the range [0.0, 1.0]. By default,
|
||||
// all events are sent. Thus, as a historical special case, the sample rate
|
||||
// 0.0 is treated as if it was 1.0. To drop all events, set the DSN to the
|
||||
// empty string.
|
||||
SampleRate float64
|
||||
// The sample rate for sampling traces in the range [0.0, 1.0].
|
||||
TracesSampleRate float64
|
||||
// Used to customize the sampling of traces, overrides TracesSampleRate.
|
||||
TracesSampler TracesSampler
|
||||
// List of regexp strings that will be used to match against event's message
|
||||
// and if applicable, caught errors type and value.
|
||||
// If the match is found, then a whole event will be dropped.
|
||||
@@ -157,17 +169,29 @@ type ClientOptions struct {
|
||||
}
|
||||
|
||||
// Client is the underlying processor that is used by the main API and Hub
|
||||
// instances.
|
||||
// instances. It must be created with NewClient.
|
||||
type Client struct {
|
||||
options ClientOptions
|
||||
dsn *Dsn
|
||||
eventProcessors []EventProcessor
|
||||
integrations []Integration
|
||||
Transport Transport
|
||||
// Transport is read-only. Replacing the transport of an existing client is
|
||||
// not supported, create a new client instead.
|
||||
Transport Transport
|
||||
}
|
||||
|
||||
// NewClient creates and returns an instance of Client configured using ClientOptions.
|
||||
// NewClient creates and returns an instance of Client configured using
|
||||
// ClientOptions.
|
||||
//
|
||||
// Most users will not create clients directly. Instead, initialize the SDK with
|
||||
// Init and use the package-level functions (for simple programs that run on a
|
||||
// single goroutine) or hub methods (for concurrent programs, for example web
|
||||
// servers).
|
||||
func NewClient(options ClientOptions) (*Client, error) {
|
||||
if options.TracesSampleRate != 0.0 && options.TracesSampler != nil {
|
||||
return nil, errors.New("TracesSampleRate and TracesSampler are mutually exclusive")
|
||||
}
|
||||
|
||||
if options.Debug {
|
||||
debugWriter := options.DebugWriter
|
||||
if debugWriter == nil {
|
||||
@@ -188,6 +212,13 @@ func NewClient(options ClientOptions) (*Client, error) {
|
||||
options.Environment = os.Getenv("SENTRY_ENVIRONMENT")
|
||||
}
|
||||
|
||||
if env := os.Getenv("SENTRYGODEBUG"); env == "dumphttp=1" {
|
||||
options.HTTPTransport = &debug.Transport{
|
||||
RoundTripper: http.DefaultTransport,
|
||||
Output: os.Stderr,
|
||||
}
|
||||
}
|
||||
|
||||
var dsn *Dsn
|
||||
if options.Dsn != "" {
|
||||
var err error
|
||||
@@ -209,17 +240,26 @@ func NewClient(options ClientOptions) (*Client, error) {
|
||||
}
|
||||
|
||||
func (client *Client) setupTransport() {
|
||||
transport := client.options.Transport
|
||||
opts := client.options
|
||||
transport := opts.Transport
|
||||
|
||||
if transport == nil {
|
||||
if client.options.Dsn == "" {
|
||||
if opts.Dsn == "" {
|
||||
transport = new(noopTransport)
|
||||
} else {
|
||||
transport = NewHTTPTransport()
|
||||
httpTransport := NewHTTPTransport()
|
||||
// When tracing is enabled, use larger buffer to
|
||||
// accommodate more concurrent events.
|
||||
// TODO(tracing): consider using separate buffers per
|
||||
// event type.
|
||||
if opts.TracesSampleRate != 0 || opts.TracesSampler != nil {
|
||||
httpTransport.BufferSize = 1000
|
||||
}
|
||||
transport = httpTransport
|
||||
}
|
||||
}
|
||||
|
||||
transport.Configure(client.options)
|
||||
transport.Configure(opts)
|
||||
client.Transport = transport
|
||||
}
|
||||
|
||||
@@ -246,7 +286,13 @@ func (client *Client) setupIntegrations() {
|
||||
}
|
||||
}
|
||||
|
||||
// AddEventProcessor adds an event processor to the client.
|
||||
// AddEventProcessor adds an event processor to the client. It must not be
|
||||
// called from concurrent goroutines. Most users will prefer to use
|
||||
// ClientOptions.BeforeSend or Scope.AddEventProcessor instead.
|
||||
//
|
||||
// Note that typical programs have only a single client created by Init and the
|
||||
// client is shared among multiple hubs, one per goroutine, such that adding an
|
||||
// event processor to the client affects all hubs that share the client.
|
||||
func (client *Client) AddEventProcessor(processor EventProcessor) {
|
||||
client.eventProcessors = append(client.eventProcessors, processor)
|
||||
}
|
||||
@@ -418,16 +464,31 @@ func (client *Client) processEvent(event *Event, hint *EventHint, scope EventMod
|
||||
|
||||
options := client.Options()
|
||||
|
||||
// TODO: Reconsider if its worth going away from default implementation
|
||||
// of other SDKs. In Go zero value (default) for float32 is 0.0,
|
||||
// which means that if someone uses ClientOptions{} struct directly
|
||||
// and we would not check for 0 here, we'd skip all events by default
|
||||
if options.SampleRate != 0.0 {
|
||||
randomFloat := rng.Float64()
|
||||
if randomFloat > options.SampleRate {
|
||||
Logger.Println("Event dropped due to SampleRate hit.")
|
||||
return nil
|
||||
}
|
||||
// The default error event sample rate for all SDKs is 1.0 (send all).
|
||||
//
|
||||
// In Go, the zero value (default) for float64 is 0.0, which means that
|
||||
// constructing a client with NewClient(ClientOptions{}), or, equivalently,
|
||||
// initializing the SDK with Init(ClientOptions{}) without an explicit
|
||||
// SampleRate would drop all events.
|
||||
//
|
||||
// To retain the desired default behavior, we exceptionally flip SampleRate
|
||||
// from 0.0 to 1.0 here. Setting the sample rate to 0.0 is not very useful
|
||||
// anyway, and the same end result can be achieved in many other ways like
|
||||
// not initializing the SDK, setting the DSN to the empty string or using an
|
||||
// event processor that always returns nil.
|
||||
//
|
||||
// An alternative API could be such that default options don't need to be
|
||||
// the same as Go's zero values, for example using the Functional Options
|
||||
// pattern. That would either require a breaking change if we want to reuse
|
||||
// the obvious NewClient name, or a new function as an alternative
|
||||
// constructor.
|
||||
if options.SampleRate == 0.0 {
|
||||
options.SampleRate = 1.0
|
||||
}
|
||||
|
||||
if !sample(options.SampleRate) {
|
||||
Logger.Println("Event dropped due to SampleRate hit.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if event = client.prepareEvent(event, hint, scope); event == nil {
|
||||
@@ -436,11 +497,10 @@ func (client *Client) processEvent(event *Event, hint *EventHint, scope EventMod
|
||||
|
||||
// As per spec, transactions do not go through BeforeSend.
|
||||
if event.Type != transactionType && options.BeforeSend != nil {
|
||||
h := &EventHint{}
|
||||
if hint != nil {
|
||||
h = hint
|
||||
if hint == nil {
|
||||
hint = &EventHint{}
|
||||
}
|
||||
if event = options.BeforeSend(event, h); event == nil {
|
||||
if event = options.BeforeSend(event, hint); event == nil {
|
||||
Logger.Println("Event dropped due to BeforeSend callback.")
|
||||
return nil
|
||||
}
|
||||
@@ -457,7 +517,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
|
||||
}
|
||||
|
||||
if event.Timestamp.IsZero() {
|
||||
event.Timestamp = time.Now().UTC()
|
||||
event.Timestamp = time.Now()
|
||||
}
|
||||
|
||||
if event.Level == "" {
|
||||
@@ -540,3 +600,9 @@ func (client Client) integrationAlreadyInstalled(name string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// sample returns true with the given probability, which must be in the range
|
||||
// [0.0, 1.0].
|
||||
func sample(probability float64) bool {
|
||||
return rng.Float64() < probability
|
||||
}
|
||||
|
||||
25
vendor/github.com/getsentry/sentry-go/doc.go
generated
vendored
25
vendor/github.com/getsentry/sentry-go/doc.go
generated
vendored
@@ -1,6 +1,9 @@
|
||||
/*
|
||||
Package sentry is the official Sentry SDK for Go.
|
||||
|
||||
Use it to report errors and track application performance through distributed
|
||||
tracing.
|
||||
|
||||
For more information about Sentry and SDK features please have a look at the
|
||||
documentation site https://docs.sentry.io/platforms/go/.
|
||||
|
||||
@@ -17,6 +20,28 @@ Sentry project. This step is accomplished through a call to sentry.Init.
|
||||
A more detailed yet simple example is available at
|
||||
https://github.com/getsentry/sentry-go/blob/master/example/basic/main.go.
|
||||
|
||||
Error Reporting
|
||||
|
||||
The Capture* functions report messages and errors to Sentry.
|
||||
|
||||
sentry.CaptureMessage(...)
|
||||
sentry.CaptureException(...)
|
||||
sentry.CaptureEvent(...)
|
||||
|
||||
Use similarly named functions in the Hub for concurrent programs like web
|
||||
servers.
|
||||
|
||||
Performance Monitoring
|
||||
|
||||
You can use Sentry to monitor your application's performance. More information
|
||||
on the product page https://docs.sentry.io/product/performance/.
|
||||
|
||||
The StartSpan function creates new spans.
|
||||
|
||||
span := sentry.StartSpan(ctx, "operation")
|
||||
...
|
||||
span.Finish()
|
||||
|
||||
Integrations
|
||||
|
||||
The SDK has support for several Go frameworks, available as subpackages.
|
||||
|
||||
5
vendor/github.com/getsentry/sentry-go/dsn.go
generated
vendored
5
vendor/github.com/getsentry/sentry-go/dsn.go
generated
vendored
@@ -48,8 +48,9 @@ type Dsn struct {
|
||||
projectID int
|
||||
}
|
||||
|
||||
// NewDsn creates an instance of Dsn by parsing provided url in a string format.
|
||||
// If Dsn is not set the client is effectively disabled.
|
||||
// NewDsn creates a Dsn by parsing rawURL. Most users will never call this
|
||||
// function directly. It is provided for use in custom Transport
|
||||
// implementations.
|
||||
func NewDsn(rawURL string) (*Dsn, error) {
|
||||
// Parse
|
||||
parsedURL, err := url.Parse(rawURL)
|
||||
|
||||
5
vendor/github.com/getsentry/sentry-go/go.mod
generated
vendored
5
vendor/github.com/getsentry/sentry-go/go.mod
generated
vendored
@@ -1,6 +1,6 @@
|
||||
module github.com/getsentry/sentry-go
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/ajg/form v1.5.1 // indirect
|
||||
@@ -13,7 +13,7 @@ require (
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/imkira/go-interpol v1.1.0 // indirect
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
|
||||
github.com/kataras/iris/v12 v12.0.1
|
||||
github.com/kataras/iris/v12 v12.1.8
|
||||
github.com/labstack/echo/v4 v4.1.11
|
||||
github.com/moul/http2curl v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.10.3 // indirect
|
||||
@@ -21,6 +21,7 @@ require (
|
||||
github.com/pingcap/errors v0.11.4
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sergi/go-diff v1.0.0 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
github.com/ugorji/go v1.1.7 // indirect
|
||||
github.com/urfave/negroni v1.0.0
|
||||
|
||||
75
vendor/github.com/getsentry/sentry-go/go.sum
generated
vendored
75
vendor/github.com/getsentry/sentry-go/go.sum
generated
vendored
@@ -1,14 +1,12 @@
|
||||
github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a h1:3SgJcK9l5uPdBC/X17wanyJAMxM33+4ZhEIV96MIH8U=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
|
||||
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible h1:rZgFj+Gtf3NMi/U5FvCvhzaxzW/TaPYgUYx3bAPz9DE=
|
||||
github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53 h1:sR+/8Yb4slttB4vD+b9btVEnWgL3Q00OBTzVT8B9C0c=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0 h1:1PwO5w5VCtlUUl+KTOBsTGZlhjWkcybsGaAau52tOy8=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo=
|
||||
github.com/Joker/hpp v1.0.0 h1:65+iuJYdRXv/XyN62C1uEmmOx3432rNG/rKlX6V7Kkc=
|
||||
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
|
||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7 h1:mreN1m/5VJ/Zc3b4pzj9qU6D9SRQ6Vm+3KfI328t3S8=
|
||||
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398 h1:WDC6ySpJzbxGWFh4aMxFFC28wwGp5pEuoTtvA4q/qQ4=
|
||||
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
|
||||
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
|
||||
@@ -37,8 +35,6 @@ github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 h1:Dddq
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4 h1:GY1+t5Dr9OKADM64SYnQjw/w99HMYvQ0A8/JoUkxVmc=
|
||||
github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
|
||||
@@ -64,10 +60,11 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
@@ -78,31 +75,32 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
|
||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4=
|
||||
github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI=
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI=
|
||||
github.com/iris-contrib/jade v1.1.3 h1:p7J/50I0cjo0wq/VWVCDFd8taPJbuFC+bq23SniRFX0=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/pongo2 v0.0.1 h1:zGP7pW51oi5eQZMIlGA3I+FHY9/HOQWDB+572yin0to=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1 h1:10g/WnoRR+U+XXHWKBHeNy/+tZmM2kcAVGLOsz+yaDA=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 h1:rhqTjzJlm7EbkELJDKMTU7udov+Se0xZkWmugr6zGok=
|
||||
github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
|
||||
github.com/juju/loggo v0.0.0-20180524022052-584905176618 h1:MK144iBQF9hTSwBW/9eJm034bVoG30IshVm688T2hi8=
|
||||
github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
|
||||
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 h1:WQM1NildKThwdP7qWrNAFGzp4ijNLw8RlgENkaI4MJs=
|
||||
github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/kataras/golog v0.0.9 h1:J7Dl82843nbKQDrQM/abbNJZvQjS6PfmkkffhOTXEpM=
|
||||
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
|
||||
github.com/kataras/iris/v12 v12.0.1 h1:Wo5S7GMWv5OAzJmvFTvss/C4TS1W0uo6LkDlSymT4rM=
|
||||
github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U=
|
||||
github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw=
|
||||
github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d h1:V5Rs9ztEWdp58oayPq/ulmlqJJZeJP6pP79uP3qjcao=
|
||||
github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
|
||||
github.com/kataras/golog v0.0.10 h1:vRDRUmwacco/pmBAm8geLn8rHEdc+9Z4NAr5Sh7TG/4=
|
||||
github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8=
|
||||
github.com/kataras/iris/v12 v12.1.8 h1:O3gJasjm7ZxpxwTH8tApZsvf274scSGQAUpNe47c37U=
|
||||
github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE=
|
||||
github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE=
|
||||
github.com/kataras/pio v0.0.2 h1:6NAi+uPJ/Zuid6mrAKlgpbI11/zK/lV4B2rxWaJN98Y=
|
||||
github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
|
||||
github.com/kataras/sitemap v0.0.5 h1:4HCONX5RLgVy6G4RkYOV3vKNcma9p236LdGOipJsaFE=
|
||||
github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.0 h1:GhthINjveNZAdFUD8QoQYfjxnOONZgztK/Yr6M23UTY=
|
||||
github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -121,20 +119,22 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
||||
github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg=
|
||||
github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ=
|
||||
github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=
|
||||
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM=
|
||||
github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
|
||||
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
|
||||
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
|
||||
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
|
||||
@@ -151,6 +151,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible h1:Uel2GXEpJqOWBrlyI+oY9LTiyyjYS17cCYRqP13/SHk=
|
||||
github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
@@ -205,6 +207,8 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 h1:sKJQZMuxjOAR/Uo2LBfU90onWEf1dF4C+0hPJCc9Mpc=
|
||||
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -214,7 +218,10 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -225,9 +232,13 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
@@ -238,6 +249,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXa
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
|
||||
gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho=
|
||||
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
|
||||
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
@@ -246,3 +259,5 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
90
vendor/github.com/getsentry/sentry-go/http/sentryhttp.go
generated
vendored
90
vendor/github.com/getsentry/sentry-go/http/sentryhttp.go
generated
vendored
@@ -1,75 +1,107 @@
|
||||
// Package sentryhttp provides Sentry integration for servers based on the
|
||||
// net/http package.
|
||||
package sentryhttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/sentry-go"
|
||||
)
|
||||
|
||||
// A Handler is an HTTP middleware factory that provides integration with
|
||||
// Sentry.
|
||||
type Handler struct {
|
||||
repanic bool
|
||||
waitForDelivery bool
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// Options configure a Handler.
|
||||
type Options struct {
|
||||
// Repanic configures whether Sentry should repanic after recovery
|
||||
// Repanic configures whether to panic again after recovering from a panic.
|
||||
// Use this option if you have other panic handlers or want the default
|
||||
// behavior from Go's http package, as documented in
|
||||
// https://golang.org/pkg/net/http/#Handler.
|
||||
Repanic bool
|
||||
// WaitForDelivery indicates whether to wait until panic details have been
|
||||
// sent to Sentry before panicking or proceeding with a request.
|
||||
// WaitForDelivery indicates, in case of a panic, whether to block the
|
||||
// current goroutine and wait until the panic event has been reported to
|
||||
// Sentry before repanicking or resuming normal execution.
|
||||
//
|
||||
// This option is normally not needed. Unless you need different behaviors
|
||||
// for different HTTP handlers, configure the SDK to use the
|
||||
// HTTPSyncTransport instead.
|
||||
//
|
||||
// Waiting (or using HTTPSyncTransport) is useful when the web server runs
|
||||
// in an environment that interrupts execution at the end of a request flow,
|
||||
// like modern serverless platforms.
|
||||
WaitForDelivery bool
|
||||
// Timeout for the event delivery requests.
|
||||
// Timeout for the delivery of panic events. Defaults to 2s. Only relevant
|
||||
// when WaitForDelivery is true.
|
||||
//
|
||||
// If the timeout is reached, the current goroutine is no longer blocked
|
||||
// waiting, but the delivery is not canceled.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// New returns a struct that provides Handle and HandleFunc methods
|
||||
// that satisfy http.Handler and http.HandlerFunc interfaces.
|
||||
// New returns a new Handler. Use the Handle and HandleFunc methods to wrap
|
||||
// existing HTTP handlers.
|
||||
func New(options Options) *Handler {
|
||||
handler := Handler{
|
||||
repanic: false,
|
||||
timeout: time.Second * 2,
|
||||
waitForDelivery: false,
|
||||
timeout := options.Timeout
|
||||
if timeout == 0 {
|
||||
timeout = 2 * time.Second
|
||||
}
|
||||
|
||||
if options.Repanic {
|
||||
handler.repanic = true
|
||||
return &Handler{
|
||||
repanic: options.Repanic,
|
||||
timeout: timeout,
|
||||
waitForDelivery: options.WaitForDelivery,
|
||||
}
|
||||
|
||||
if options.Timeout != 0 {
|
||||
handler.timeout = options.Timeout
|
||||
}
|
||||
|
||||
if options.WaitForDelivery {
|
||||
handler.waitForDelivery = true
|
||||
}
|
||||
|
||||
return &handler
|
||||
}
|
||||
|
||||
// Handle wraps http.Handler and recovers from caught panics.
|
||||
// Handle works as a middleware that wraps an existing http.Handler. A wrapped
|
||||
// handler will recover from and report panics to Sentry, and provide access to
|
||||
// a request-specific hub to report messages and errors.
|
||||
func (h *Handler) Handle(handler http.Handler) http.Handler {
|
||||
return h.handle(handler)
|
||||
}
|
||||
|
||||
// Deprecated: Use the Handle method instead.
|
||||
// HandleFunc is like Handle, but with a handler function parameter for cases
|
||||
// where that is convenient. In particular, use it to wrap a handler function
|
||||
// literal.
|
||||
//
|
||||
// http.Handle(pattern, h.HandleFunc(func (w http.ResponseWriter, r *http.Request) {
|
||||
// // handler code here
|
||||
// }))
|
||||
func (h *Handler) HandleFunc(handler http.HandlerFunc) http.HandlerFunc {
|
||||
return h.handle(handler)
|
||||
}
|
||||
|
||||
func (h *Handler) handle(handler http.Handler) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
hub := sentry.GetHubFromContext(ctx)
|
||||
if hub == nil {
|
||||
hub = sentry.CurrentHub().Clone()
|
||||
ctx = sentry.SetHubOnContext(ctx, hub)
|
||||
}
|
||||
span := sentry.StartSpan(ctx, "http.server",
|
||||
sentry.TransactionName(fmt.Sprintf("%s %s", r.Method, r.URL.Path)),
|
||||
sentry.ContinueFromRequest(r),
|
||||
)
|
||||
defer span.Finish()
|
||||
// TODO(tracing): if the next handler.ServeHTTP panics, store
|
||||
// information on the transaction accordingly (status, tag,
|
||||
// level?, ...).
|
||||
r = r.WithContext(span.Context())
|
||||
hub.Scope().SetRequest(r)
|
||||
ctx = sentry.SetHubOnContext(ctx, hub)
|
||||
defer h.recoverWithSentry(hub, r)
|
||||
handler.ServeHTTP(rw, r.WithContext(ctx))
|
||||
})
|
||||
// TODO(tracing): use custom response writer to intercept
|
||||
// response. Use HTTP status to add tag to transaction; set span
|
||||
// status.
|
||||
handler.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) recoverWithSentry(hub *sentry.Hub, r *http.Request) {
|
||||
|
||||
53
vendor/github.com/getsentry/sentry-go/hub.go
generated
vendored
53
vendor/github.com/getsentry/sentry-go/hub.go
generated
vendored
@@ -92,30 +92,21 @@ func (hub *Hub) LastEventID() EventID {
|
||||
return hub.lastEventID
|
||||
}
|
||||
|
||||
// stackTop returns the top layer of the hub stack. Valid hubs always have at
|
||||
// least one layer, therefore stackTop always return a non-nil pointer.
|
||||
func (hub *Hub) stackTop() *layer {
|
||||
hub.mu.RLock()
|
||||
defer hub.mu.RUnlock()
|
||||
|
||||
stack := hub.stack
|
||||
if stack == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
stackLen := len(*stack)
|
||||
if stackLen == 0 {
|
||||
return nil
|
||||
}
|
||||
top := (*stack)[stackLen-1]
|
||||
|
||||
return top
|
||||
}
|
||||
|
||||
// Clone returns a copy of the current Hub with top-most scope and client copied over.
|
||||
func (hub *Hub) Clone() *Hub {
|
||||
top := hub.stackTop()
|
||||
if top == nil {
|
||||
return nil
|
||||
}
|
||||
scope := top.scope
|
||||
if scope != nil {
|
||||
scope = scope.Clone()
|
||||
@@ -126,18 +117,12 @@ func (hub *Hub) Clone() *Hub {
|
||||
// Scope returns top-level Scope of the current Hub or nil if no Scope is bound.
|
||||
func (hub *Hub) Scope() *Scope {
|
||||
top := hub.stackTop()
|
||||
if top == nil {
|
||||
return nil
|
||||
}
|
||||
return top.scope
|
||||
}
|
||||
|
||||
// Client returns top-level Client of the current Hub or nil if no Client is bound.
|
||||
func (hub *Hub) Client() *Client {
|
||||
top := hub.stackTop()
|
||||
if top == nil {
|
||||
return nil
|
||||
}
|
||||
return top.Client()
|
||||
}
|
||||
|
||||
@@ -145,13 +130,8 @@ func (hub *Hub) Client() *Client {
|
||||
func (hub *Hub) PushScope() *Scope {
|
||||
top := hub.stackTop()
|
||||
|
||||
var client *Client
|
||||
if top != nil {
|
||||
client = top.Client()
|
||||
}
|
||||
|
||||
var scope *Scope
|
||||
if top != nil && top.scope != nil {
|
||||
if top.scope != nil {
|
||||
scope = top.scope.Clone()
|
||||
} else {
|
||||
scope = NewScope()
|
||||
@@ -161,21 +141,29 @@ func (hub *Hub) PushScope() *Scope {
|
||||
defer hub.mu.Unlock()
|
||||
|
||||
*hub.stack = append(*hub.stack, &layer{
|
||||
client: client,
|
||||
client: top.Client(),
|
||||
scope: scope,
|
||||
})
|
||||
|
||||
return scope
|
||||
}
|
||||
|
||||
// PopScope pops the most recent scope for the current Hub.
|
||||
// PopScope drops the most recent scope.
|
||||
//
|
||||
// Calls to PopScope must be coordinated with PushScope. For most cases, using
|
||||
// WithScope should be more convenient.
|
||||
//
|
||||
// Calls to PopScope that do not match previous calls to PushScope are silently
|
||||
// ignored.
|
||||
func (hub *Hub) PopScope() {
|
||||
hub.mu.Lock()
|
||||
defer hub.mu.Unlock()
|
||||
|
||||
stack := *hub.stack
|
||||
stackLen := len(stack)
|
||||
if stackLen > 0 {
|
||||
if stackLen > 1 {
|
||||
// Never pop the last item off the stack, the stack should always have
|
||||
// at least one item.
|
||||
*hub.stack = stack[0 : stackLen-1]
|
||||
}
|
||||
}
|
||||
@@ -183,9 +171,7 @@ func (hub *Hub) PopScope() {
|
||||
// BindClient binds a new Client for the current Hub.
|
||||
func (hub *Hub) BindClient(client *Client) {
|
||||
top := hub.stackTop()
|
||||
if top != nil {
|
||||
top.SetClient(client)
|
||||
}
|
||||
top.SetClient(client)
|
||||
}
|
||||
|
||||
// WithScope runs f in an isolated temporary scope.
|
||||
@@ -381,6 +367,15 @@ func GetHubFromContext(ctx context.Context) *Hub {
|
||||
return nil
|
||||
}
|
||||
|
||||
// hubFromContext returns either a hub stored in the context or the current hub.
|
||||
// The return value is guaranteed to be non-nil, unlike GetHubFromContext.
|
||||
func hubFromContext(ctx context.Context) *Hub {
|
||||
if hub, ok := ctx.Value(HubContextKey).(*Hub); ok {
|
||||
return hub
|
||||
}
|
||||
return currentHub
|
||||
}
|
||||
|
||||
// SetHubOnContext stores given Hub instance on the Context struct and returns a new Context.
|
||||
func SetHubOnContext(ctx context.Context, hub *Hub) context.Context {
|
||||
return context.WithValue(ctx, HubContextKey, hub)
|
||||
|
||||
196
vendor/github.com/getsentry/sentry-go/interfaces.go
generated
vendored
196
vendor/github.com/getsentry/sentry-go/interfaces.go
generated
vendored
@@ -52,31 +52,42 @@ type BreadcrumbHint map[string]interface{}
|
||||
// Breadcrumb specifies an application event that occurred before a Sentry event.
|
||||
// An event may contain one or more breadcrumbs.
|
||||
type Breadcrumb struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Category string `json:"category,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
Level Level `json:"level,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// TODO: provide constants for known breadcrumb types.
|
||||
// See https://develop.sentry.dev/sdk/event-payloads/breadcrumbs/#breadcrumb-types.
|
||||
|
||||
// MarshalJSON converts the Breadcrumb struct to JSON.
|
||||
func (b *Breadcrumb) MarshalJSON() ([]byte, error) {
|
||||
type alias Breadcrumb
|
||||
// encoding/json doesn't support the "omitempty" option for struct types.
|
||||
// See https://golang.org/issues/11939.
|
||||
// This implementation of MarshalJSON shadows the original Timestamp field
|
||||
// forcing it to be omitted when the Timestamp is the zero value of
|
||||
// time.Time.
|
||||
// We want to omit time.Time zero values, otherwise the server will try to
|
||||
// interpret dates too far in the past. However, encoding/json doesn't
|
||||
// support the "omitempty" option for struct types. See
|
||||
// https://golang.org/issues/11939.
|
||||
//
|
||||
// We overcome the limitation and achieve what we want by shadowing fields
|
||||
// and a few type tricks.
|
||||
|
||||
// breadcrumb aliases Breadcrumb to allow calling json.Marshal without an
|
||||
// infinite loop. It preserves all fields while none of the attached
|
||||
// methods.
|
||||
type breadcrumb Breadcrumb
|
||||
|
||||
if b.Timestamp.IsZero() {
|
||||
return json.Marshal(&struct {
|
||||
*alias
|
||||
return json.Marshal(struct {
|
||||
// Embed all of the fields of Breadcrumb.
|
||||
*breadcrumb
|
||||
// Timestamp shadows the original Timestamp field and is meant to
|
||||
// remain nil, triggering the omitempty behavior.
|
||||
Timestamp json.RawMessage `json:"timestamp,omitempty"`
|
||||
}{
|
||||
alias: (*alias)(b),
|
||||
})
|
||||
}{breadcrumb: (*breadcrumb)(b)})
|
||||
}
|
||||
return json.Marshal((*alias)(b))
|
||||
return json.Marshal((*breadcrumb)(b))
|
||||
}
|
||||
|
||||
// User describes the user associated with an Event. If this is used, at least
|
||||
@@ -139,12 +150,11 @@ func NewRequest(r *http.Request) *Request {
|
||||
|
||||
// Exception specifies an error that occurred.
|
||||
type Exception struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Value string `json:"value,omitempty"`
|
||||
Module string `json:"module,omitempty"`
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Stacktrace *Stacktrace `json:"stacktrace,omitempty"`
|
||||
RawStacktrace *Stacktrace `json:"raw_stacktrace,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Value string `json:"value,omitempty"`
|
||||
Module string `json:"module,omitempty"`
|
||||
ThreadID string `json:"thread_id,omitempty"`
|
||||
Stacktrace *Stacktrace `json:"stacktrace,omitempty"`
|
||||
}
|
||||
|
||||
// EventID is a hexadecimal string representing a unique uuid4 for an Event.
|
||||
@@ -176,51 +186,99 @@ type Event struct {
|
||||
Request *Request `json:"request,omitempty"`
|
||||
Exception []Exception `json:"exception,omitempty"`
|
||||
|
||||
// Experimental: This is part of a beta feature of the SDK. The fields below
|
||||
// are only relevant for transactions.
|
||||
Type string `json:"type,omitempty"`
|
||||
StartTimestamp time.Time `json:"start_timestamp"`
|
||||
Spans []*Span `json:"spans,omitempty"`
|
||||
// The fields below are only relevant for transactions.
|
||||
|
||||
Type string `json:"type,omitempty"`
|
||||
StartTime time.Time `json:"start_timestamp"`
|
||||
Spans []*Span `json:"spans,omitempty"`
|
||||
}
|
||||
|
||||
// TODO: Event.Contexts map[string]interface{} => map[string]EventContext,
|
||||
// to prevent accidentally storing T when we mean *T.
|
||||
// For example, the TraceContext must be stored as *TraceContext to pick up the
|
||||
// MarshalJSON method (and avoid copying).
|
||||
// type EventContext interface{ EventContext() }
|
||||
|
||||
// MarshalJSON converts the Event struct to JSON.
|
||||
func (e *Event) MarshalJSON() ([]byte, error) {
|
||||
// We want to omit time.Time zero values, otherwise the server will try to
|
||||
// interpret dates too far in the past. However, encoding/json doesn't
|
||||
// support the "omitempty" option for struct types. See
|
||||
// https://golang.org/issues/11939.
|
||||
//
|
||||
// We overcome the limitation and achieve what we want by shadowing fields
|
||||
// and a few type tricks.
|
||||
if e.Type == transactionType {
|
||||
return e.transactionMarshalJSON()
|
||||
}
|
||||
return e.defaultMarshalJSON()
|
||||
}
|
||||
|
||||
func (e *Event) defaultMarshalJSON() ([]byte, error) {
|
||||
// event aliases Event to allow calling json.Marshal without an infinite
|
||||
// loop. It preserves all fields of Event while none of the attached
|
||||
// methods.
|
||||
// loop. It preserves all fields while none of the attached methods.
|
||||
type event Event
|
||||
|
||||
// Transactions are marshaled in the standard way how json.Marshal works.
|
||||
if e.Type == transactionType {
|
||||
return json.Marshal((*event)(e))
|
||||
}
|
||||
|
||||
// errorEvent is like Event with some shadowed fields for customizing the
|
||||
// JSON serialization of regular "error events".
|
||||
// errorEvent is like Event with shadowed fields for customizing JSON
|
||||
// marshaling.
|
||||
type errorEvent struct {
|
||||
*event
|
||||
|
||||
// encoding/json doesn't support the omitempty option for struct types.
|
||||
// See https://golang.org/issues/11939.
|
||||
// We shadow the original Event.Timestamp field with a json.RawMessage.
|
||||
// This allows us to include the timestamp when non-zero and omit it
|
||||
// otherwise.
|
||||
// Timestamp shadows the original Timestamp field. It allows us to
|
||||
// include the timestamp when non-zero and omit it otherwise.
|
||||
Timestamp json.RawMessage `json:"timestamp,omitempty"`
|
||||
|
||||
// The fields below are not part of the regular "error events" and only
|
||||
// make sense to be sent for transactions. They shadow the respective
|
||||
// fields in Event and are meant to remain nil, triggering the omitempty
|
||||
// behavior.
|
||||
Type json.RawMessage `json:"type,omitempty"`
|
||||
StartTimestamp json.RawMessage `json:"start_timestamp,omitempty"`
|
||||
Spans json.RawMessage `json:"spans,omitempty"`
|
||||
// The fields below are not part of error events and only make sense to
|
||||
// be sent for transactions. They shadow the respective fields in Event
|
||||
// and are meant to remain nil, triggering the omitempty behavior.
|
||||
|
||||
Type json.RawMessage `json:"type,omitempty"`
|
||||
StartTime json.RawMessage `json:"start_timestamp,omitempty"`
|
||||
Spans json.RawMessage `json:"spans,omitempty"`
|
||||
}
|
||||
|
||||
x := &errorEvent{event: (*event)(e)}
|
||||
x := errorEvent{event: (*event)(e)}
|
||||
if !e.Timestamp.IsZero() {
|
||||
x.Timestamp = append(x.Timestamp, '"')
|
||||
x.Timestamp = e.Timestamp.UTC().AppendFormat(x.Timestamp, time.RFC3339Nano)
|
||||
x.Timestamp = append(x.Timestamp, '"')
|
||||
b, err := e.Timestamp.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x.Timestamp = b
|
||||
}
|
||||
return json.Marshal(x)
|
||||
}
|
||||
|
||||
func (e *Event) transactionMarshalJSON() ([]byte, error) {
|
||||
// event aliases Event to allow calling json.Marshal without an infinite
|
||||
// loop. It preserves all fields while none of the attached methods.
|
||||
type event Event
|
||||
|
||||
// transactionEvent is like Event with shadowed fields for customizing JSON
|
||||
// marshaling.
|
||||
type transactionEvent struct {
|
||||
*event
|
||||
|
||||
// The fields below shadow the respective fields in Event. They allow us
|
||||
// to include timestamps when non-zero and omit them otherwise.
|
||||
|
||||
StartTime json.RawMessage `json:"start_timestamp,omitempty"`
|
||||
Timestamp json.RawMessage `json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
x := transactionEvent{event: (*event)(e)}
|
||||
if !e.Timestamp.IsZero() {
|
||||
b, err := e.Timestamp.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x.Timestamp = b
|
||||
}
|
||||
if !e.StartTime.IsZero() {
|
||||
b, err := e.StartTime.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x.StartTime = b
|
||||
}
|
||||
return json.Marshal(x)
|
||||
}
|
||||
@@ -238,12 +296,11 @@ func NewEvent() *Event {
|
||||
|
||||
// Thread specifies threads that were running at the time of an event.
|
||||
type Thread struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Stacktrace *Stacktrace `json:"stacktrace,omitempty"`
|
||||
RawStacktrace *Stacktrace `json:"raw_stacktrace,omitempty"`
|
||||
Crashed bool `json:"crashed,omitempty"`
|
||||
Current bool `json:"current,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Stacktrace *Stacktrace `json:"stacktrace,omitempty"`
|
||||
Crashed bool `json:"crashed,omitempty"`
|
||||
Current bool `json:"current,omitempty"`
|
||||
}
|
||||
|
||||
// EventHint contains information that can be associated with an Event.
|
||||
@@ -256,30 +313,3 @@ type EventHint struct {
|
||||
Request *http.Request
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
// TraceContext describes the context of the trace.
|
||||
//
|
||||
// Experimental: This is part of a beta feature of the SDK.
|
||||
type TraceContext struct {
|
||||
TraceID string `json:"trace_id"`
|
||||
SpanID string `json:"span_id"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// Span describes a timed unit of work in a trace.
|
||||
//
|
||||
// Experimental: This is part of a beta feature of the SDK.
|
||||
type Span struct {
|
||||
TraceID string `json:"trace_id"`
|
||||
SpanID string `json:"span_id"`
|
||||
ParentSpanID string `json:"parent_span_id,omitempty"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
StartTimestamp time.Time `json:"start_timestamp"`
|
||||
EndTimestamp time.Time `json:"timestamp"`
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
23
vendor/github.com/getsentry/sentry-go/internal/crypto/randutil/randutil.go
generated
vendored
Normal file
23
vendor/github.com/getsentry/sentry-go/internal/crypto/randutil/randutil.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package randutil
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const (
|
||||
floatMax = 1 << 53
|
||||
floatMask = floatMax - 1
|
||||
)
|
||||
|
||||
// Float64 returns a cryptographically secure random number in [0.0, 1.0).
|
||||
func Float64() float64 {
|
||||
// The implementation is, in essence:
|
||||
// return float64(rand.Int63n(1<<53)) / (1<<53)
|
||||
b := make([]byte, 8)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return float64(binary.LittleEndian.Uint64(b)&floatMask) / floatMax
|
||||
}
|
||||
45
vendor/github.com/getsentry/sentry-go/internal/debug/transport.go
generated
vendored
Normal file
45
vendor/github.com/getsentry/sentry-go/internal/debug/transport.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
)
|
||||
|
||||
// Transport implements http.RoundTripper and can be used to wrap other HTTP
|
||||
// transports to dump request and responses for debugging.
|
||||
type Transport struct {
|
||||
http.RoundTripper
|
||||
Output io.Writer
|
||||
}
|
||||
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
b, err := httputil.DumpRequestOut(req, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = t.Output.Write(ensureTrailingNewline(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := t.RoundTripper.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err = httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = t.Output.Write(ensureTrailingNewline(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func ensureTrailingNewline(b []byte) []byte {
|
||||
if len(b) > 0 && b[len(b)-1] != '\n' {
|
||||
b = append(b, '\n')
|
||||
}
|
||||
return b
|
||||
}
|
||||
16
vendor/github.com/getsentry/sentry-go/scope.go
generated
vendored
16
vendor/github.com/getsentry/sentry-go/scope.go
generated
vendored
@@ -63,7 +63,7 @@ func NewScope() *Scope {
|
||||
// and optionally throws the old one if limit is reached.
|
||||
func (scope *Scope) AddBreadcrumb(breadcrumb *Breadcrumb, limit int) {
|
||||
if breadcrumb.Timestamp.IsZero() {
|
||||
breadcrumb.Timestamp = time.Now().UTC()
|
||||
breadcrumb.Timestamp = time.Now()
|
||||
}
|
||||
|
||||
scope.mu.Lock()
|
||||
@@ -278,12 +278,20 @@ func (scope *Scope) SetLevel(level Level) {
|
||||
scope.level = level
|
||||
}
|
||||
|
||||
// SetTransaction sets new transaction name for the current transaction.
|
||||
func (scope *Scope) SetTransaction(transactionName string) {
|
||||
// SetTransaction sets the transaction name for the current transaction.
|
||||
func (scope *Scope) SetTransaction(name string) {
|
||||
scope.mu.Lock()
|
||||
defer scope.mu.Unlock()
|
||||
|
||||
scope.transaction = transactionName
|
||||
scope.transaction = name
|
||||
}
|
||||
|
||||
// Transaction returns the transaction name for the current transaction.
|
||||
func (scope *Scope) Transaction() (name string) {
|
||||
scope.mu.RLock()
|
||||
defer scope.mu.RUnlock()
|
||||
|
||||
return scope.transaction
|
||||
}
|
||||
|
||||
// Clone returns a copy of the current scope with all data copied over.
|
||||
|
||||
4
vendor/github.com/getsentry/sentry-go/sentry.go
generated
vendored
4
vendor/github.com/getsentry/sentry-go/sentry.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// Version is the version of the SDK.
|
||||
const Version = "0.7.0"
|
||||
const Version = "0.9.0"
|
||||
|
||||
// apiVersion is the minimum version of the Sentry API compatible with the
|
||||
// sentry-go SDK.
|
||||
@@ -92,7 +92,7 @@ func ConfigureScope(f func(scope *Scope)) {
|
||||
hub.ConfigureScope(f)
|
||||
}
|
||||
|
||||
// PushScope is a shorthand for CurrentHub().PushPushScope.
|
||||
// PushScope is a shorthand for CurrentHub().PushScope.
|
||||
func PushScope() {
|
||||
hub := CurrentHub()
|
||||
hub.PushScope()
|
||||
|
||||
57
vendor/github.com/getsentry/sentry-go/span_recorder.go
generated
vendored
Normal file
57
vendor/github.com/getsentry/sentry-go/span_recorder.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package sentry
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// maxSpans limits the number of recorded spans per transaction. The limit is
|
||||
// meant to bound memory usage and prevent too large transaction events that
|
||||
// would be rejected by Sentry.
|
||||
const maxSpans = 1000
|
||||
|
||||
// A spanRecorder stores a span tree that makes up a transaction. Safe for
|
||||
// concurrent use. It is okay to add child spans from multiple goroutines.
|
||||
type spanRecorder struct {
|
||||
mu sync.Mutex
|
||||
spans []*Span
|
||||
overflowOnce sync.Once
|
||||
}
|
||||
|
||||
// record stores a span. The first stored span is assumed to be the root of a
|
||||
// span tree.
|
||||
func (r *spanRecorder) record(s *Span) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if len(r.spans) >= maxSpans {
|
||||
r.overflowOnce.Do(func() {
|
||||
root := r.spans[0]
|
||||
Logger.Printf("Too many spans: dropping spans from transaction with TraceID=%s SpanID=%s limit=%d",
|
||||
root.TraceID, root.SpanID, maxSpans)
|
||||
})
|
||||
// TODO(tracing): mark the transaction event in some way to
|
||||
// communicate that spans were dropped.
|
||||
return
|
||||
}
|
||||
r.spans = append(r.spans, s)
|
||||
}
|
||||
|
||||
// root returns the first recorded span. Returns nil if none have been recorded.
|
||||
func (r *spanRecorder) root() *Span {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if len(r.spans) == 0 {
|
||||
return nil
|
||||
}
|
||||
return r.spans[0]
|
||||
}
|
||||
|
||||
// children returns a list of all recorded spans, except the root. Returns nil
|
||||
// if there are no children.
|
||||
func (r *spanRecorder) children() []*Span {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if len(r.spans) < 2 {
|
||||
return nil
|
||||
}
|
||||
return r.spans[1:]
|
||||
}
|
||||
171
vendor/github.com/getsentry/sentry-go/traces_sampler.go
generated
vendored
Normal file
171
vendor/github.com/getsentry/sentry-go/traces_sampler.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
package sentry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/getsentry/sentry-go/internal/crypto/randutil"
|
||||
)
|
||||
|
||||
// A TracesSampler makes sampling decisions for spans.
|
||||
//
|
||||
// In addition to the sampling context passed to the Sample method,
|
||||
// implementations may keep and use internal state to make decisions.
|
||||
//
|
||||
// Sampling is one of the last steps when starting a new span, such that the
|
||||
// sampler can inspect most of the state of the span to make a decision.
|
||||
//
|
||||
// Implementations must be safe for concurrent use by multiple goroutines.
|
||||
type TracesSampler interface {
|
||||
Sample(ctx SamplingContext) Sampled
|
||||
}
|
||||
|
||||
// Implementation note:
|
||||
//
|
||||
// TracesSampler.Sample return type is Sampled (instead of bool or float64), so
|
||||
// that we can compose samplers by letting a sampler return SampledUndefined to
|
||||
// defer the decision to the next sampler.
|
||||
//
|
||||
// For example, a hypothetical InheritFromParentSampler would return
|
||||
// SampledUndefined if there is no parent span in the SamplingContext, deferring
|
||||
// the sampling decision to another sampler, like a UniformSampler.
|
||||
//
|
||||
// var _ TracesSampler = sentry.TracesSamplers{
|
||||
// sentry.InheritFromParentSampler,
|
||||
// sentry.UniformTracesSampler(0.1),
|
||||
// }
|
||||
//
|
||||
// Another example, we can provide a sampler that returns SampledFalse if the
|
||||
// SamplingContext matches some condition, and SampledUndefined otherwise:
|
||||
//
|
||||
// var _ TracesSampler = sentry.TracesSamplers{
|
||||
// sentry.IgnoreTransaction(regexp.MustCompile(`^\w+ /(favicon.ico|healthz)`),
|
||||
// sentry.InheritFromParentSampler,
|
||||
// sentry.UniformTracesSampler(0.1),
|
||||
// }
|
||||
//
|
||||
// If after running all samplers the decision is still undefined, the
|
||||
// span/transaction is not sampled.
|
||||
|
||||
// A SamplingContext is passed to a TracesSampler to determine a sampling
|
||||
// decision.
|
||||
type SamplingContext struct {
|
||||
Span *Span // The current span, always non-nil.
|
||||
Parent *Span // The parent span, may be nil.
|
||||
}
|
||||
|
||||
// TODO(tracing): possibly expand SamplingContext to include custom /
|
||||
// user-provided data.
|
||||
//
|
||||
// Unlike in other SDKs, the current http.Request is not part of the
|
||||
// SamplingContext to avoid bloating it with possibly unnecessary values that
|
||||
// could confuse people or have negative performance consequences.
|
||||
//
|
||||
// For the request to be provided in a SamplingContext, a request pointer would
|
||||
// most likely need to be stored in the span context and it would open precedent
|
||||
// for more arbitrary data like fasthttp.Request.
|
||||
//
|
||||
// Users wanting to influence the sampling decision based on the request can
|
||||
// still do so, either by updating the transaction directly on their HTTP
|
||||
// handler:
|
||||
//
|
||||
// func(w http.ResponseWriter, r *http.Request) {
|
||||
// transaction := sentry.TransactionFromContext(r.Context())
|
||||
// if r.Header.Get("X-Custom-Sampling") == "yes" {
|
||||
// transaction.Sampled = sentry.SampledTrue
|
||||
// } else {
|
||||
// transaction.Sampled = sentry.SampledFalse
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Or by having their own middleware that stores arbitrary data in the request
|
||||
// context (a pointer to the request itself included):
|
||||
//
|
||||
// type myContextKey struct{}
|
||||
// type myContextData struct {
|
||||
// request *http.Request
|
||||
// // ...
|
||||
// }
|
||||
//
|
||||
// func middleware(h http.Handler) http.Handler {
|
||||
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// data := &myContextData{
|
||||
// request: r,
|
||||
// }
|
||||
// ctx := context.WithValue(r.Context(), myContextKey{}, data)
|
||||
// h.ServeHTTP(w, r.WithContext(ctx))
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// err := sentry.Init(sentry.ClientOptions{
|
||||
// // A custom TracesSampler can access data from the span's context:
|
||||
// TracesSampler: sentry.TracesSamplerFunc(func(ctx sentry.SamplingContext) bool {
|
||||
// data, ok := ctx.Span.Context().Value(myContextKey{}).(*myContextData)
|
||||
// if !ok {
|
||||
// return false
|
||||
// }
|
||||
// return data.request.URL.Hostname() == "example.com"
|
||||
// }),
|
||||
// })
|
||||
// // ...
|
||||
// }
|
||||
//
|
||||
// Note, however, that for the middleware to be effective, it would have to run
|
||||
// before sentryhttp's own middleware, meaning the middleware itself is not
|
||||
// instrumented to send panics to Sentry and it is not part of the timed
|
||||
// transaction.
|
||||
//
|
||||
// If neither of those prove to be sufficient, we can consider including a
|
||||
// (possibly nil) *http.Request field to SamplingContext. In that case, the SDK
|
||||
// would need to track the request either in the Scope or the Span.Context.
|
||||
//
|
||||
// Alternatively, add a map-like type or simply a generic interface{} similar to
|
||||
// the CustomSamplingContext type in the Java SDK:
|
||||
//
|
||||
// type SamplingContext struct {
|
||||
// Span *Span // The current span, always non-nil.
|
||||
// Parent *Span // The parent span, may be nil.
|
||||
// CustomData interface{}
|
||||
// }
|
||||
//
|
||||
// func CustomSamplingContext(data interface{}) SpanOption {
|
||||
// return func(s *Span) {
|
||||
// s.customSamplingContext = data
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// // ...
|
||||
// span := sentry.StartSpan(ctx, "op", CustomSamplingContext(data))
|
||||
// // ...
|
||||
// }
|
||||
|
||||
// The TracesSamplerFunc type is an adapter to allow the use of ordinary
|
||||
// functions as a TracesSampler.
|
||||
type TracesSamplerFunc func(ctx SamplingContext) Sampled
|
||||
|
||||
var _ TracesSampler = TracesSamplerFunc(nil)
|
||||
|
||||
func (f TracesSamplerFunc) Sample(ctx SamplingContext) Sampled {
|
||||
return f(ctx)
|
||||
}
|
||||
|
||||
// UniformTracesSampler is a TracesSampler that samples root spans randomly at a
|
||||
// uniform rate.
|
||||
type UniformTracesSampler float64
|
||||
|
||||
var _ TracesSampler = UniformTracesSampler(0)
|
||||
|
||||
func (s UniformTracesSampler) Sample(ctx SamplingContext) Sampled {
|
||||
if s < 0.0 || s > 1.0 {
|
||||
panic(fmt.Errorf("sampling rate out of range [0.0, 1.0]: %f", s))
|
||||
}
|
||||
if randutil.Float64() < float64(s) {
|
||||
return SampledTrue
|
||||
}
|
||||
return SampledFalse
|
||||
}
|
||||
|
||||
// TODO(tracing): implement and export basic TracesSampler implementations:
|
||||
// parent-based, span ID / trace ID based, etc. It should be possible to compose
|
||||
// parent-based with other samplers.
|
||||
594
vendor/github.com/getsentry/sentry-go/tracing.go
generated
vendored
Normal file
594
vendor/github.com/getsentry/sentry-go/tracing.go
generated
vendored
Normal file
@@ -0,0 +1,594 @@
|
||||
package sentry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Span is the building block of a Sentry transaction. Spans build up a tree
|
||||
// structure of timed operations. The span tree makes up a transaction event
|
||||
// that is sent to Sentry when the root span is finished.
|
||||
//
|
||||
// Spans must be started with either StartSpan or Span.StartChild.
|
||||
type Span struct { //nolint: maligned // prefer readability over optimal memory layout (see note below *)
|
||||
TraceID TraceID `json:"trace_id"`
|
||||
SpanID SpanID `json:"span_id"`
|
||||
ParentSpanID SpanID `json:"parent_span_id"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Status SpanStatus `json:"status,omitempty"`
|
||||
Tags map[string]string `json:"tags,omitempty"`
|
||||
StartTime time.Time `json:"start_timestamp"`
|
||||
EndTime time.Time `json:"timestamp"`
|
||||
Data map[string]interface{} `json:"data,omitempty"`
|
||||
|
||||
Sampled Sampled `json:"-"`
|
||||
|
||||
// ctx is the context where the span was started. Always non-nil.
|
||||
ctx context.Context
|
||||
|
||||
// parent refers to the immediate local parent span. A remote parent span is
|
||||
// only referenced by setting ParentSpanID.
|
||||
parent *Span
|
||||
|
||||
// isTransaction is true only for the root span of a local span tree. The
|
||||
// root span is the first span started in a context. Note that a local root
|
||||
// span may have a remote parent belonging to the same trace, therefore
|
||||
// isTransaction depends on ctx and not on parent.
|
||||
isTransaction bool
|
||||
|
||||
// recorder stores all spans in a transaction. Guaranteed to be non-nil.
|
||||
recorder *spanRecorder
|
||||
}
|
||||
|
||||
// (*) Note on maligned:
|
||||
//
|
||||
// We prefer readability over optimal memory layout. If we ever decide to
|
||||
// reorder fields, we can use a tool:
|
||||
//
|
||||
// go run honnef.co/go/tools/cmd/structlayout -json . Span | go run honnef.co/go/tools/cmd/structlayout-optimize
|
||||
//
|
||||
// Other structs would deserve reordering as well, for example Event.
|
||||
|
||||
// TODO: make Span.Tags and Span.Data opaque types (struct{unexported []slice}).
|
||||
// An opaque type allows us to add methods and make it more convenient to use
|
||||
// than maps, because maps require careful nil checks to use properly or rely on
|
||||
// explicit initialization for every span, even when there might be no
|
||||
// tags/data. For Span.Data, must gracefully handle values that cannot be
|
||||
// marshaled into JSON (see transport.go:getRequestBodyFromEvent).
|
||||
|
||||
// StartSpan starts a new span to describe an operation. The new span will be a
|
||||
// child of the last span stored in ctx, if any.
|
||||
//
|
||||
// One or more options can be used to modify the span properties. Typically one
|
||||
// option as a function literal is enough. Combining multiple options can be
|
||||
// useful to define and reuse specific properties with named functions.
|
||||
//
|
||||
// Caller should call the Finish method on the span to mark its end. Finishing a
|
||||
// root span sends the span and all of its children, recursively, as a
|
||||
// transaction to Sentry.
|
||||
func StartSpan(ctx context.Context, operation string, options ...SpanOption) *Span {
|
||||
parent, hasParent := ctx.Value(spanContextKey{}).(*Span)
|
||||
var span Span
|
||||
span = Span{
|
||||
// defaults
|
||||
Op: operation,
|
||||
StartTime: time.Now(),
|
||||
|
||||
ctx: context.WithValue(ctx, spanContextKey{}, &span),
|
||||
parent: parent,
|
||||
isTransaction: !hasParent,
|
||||
}
|
||||
if hasParent {
|
||||
span.TraceID = parent.TraceID
|
||||
} else {
|
||||
// Implementation note:
|
||||
//
|
||||
// While math/rand is ~2x faster than crypto/rand (exact
|
||||
// difference depends on hardware / OS), crypto/rand is probably
|
||||
// fast enough and a safer choice.
|
||||
//
|
||||
// For reference, OpenTelemetry [1] uses crypto/rand to seed
|
||||
// math/rand. AFAICT this approach does not preserve the
|
||||
// properties from crypto/rand that make it suitable for
|
||||
// cryptography. While it might be debatable whether those
|
||||
// properties are important for us here, again, we're taking the
|
||||
// safer path.
|
||||
//
|
||||
// See [2a] & [2b] for a discussion of some of the properties we
|
||||
// obtain by using crypto/rand and [3a] & [3b] for why we avoid
|
||||
// math/rand.
|
||||
//
|
||||
// Because the math/rand seed has only 64 bits (int64), if the
|
||||
// first thing we do after seeding an RNG is to read in a random
|
||||
// TraceID, there are only 2^64 possible values. Compared to
|
||||
// UUID v4 that have 122 random bits, there is a much greater
|
||||
// chance of collision [4a] & [4b].
|
||||
//
|
||||
// [1]: https://github.com/open-telemetry/opentelemetry-go/blob/958041ddf619a128/sdk/trace/trace.go#L25-L31
|
||||
// [2a]: https://security.stackexchange.com/q/120352/246345
|
||||
// [2b]: https://security.stackexchange.com/a/120365/246345
|
||||
// [3a]: https://github.com/golang/go/issues/11871#issuecomment-126333686
|
||||
// [3b]: https://github.com/golang/go/issues/11871#issuecomment-126357889
|
||||
// [4a]: https://en.wikipedia.org/wiki/Universally_unique_identifier#Collisions
|
||||
// [4b]: https://www.wolframalpha.com/input/?i=sqrt%282*2%5E64*ln%281%2F%281-0.5%29%29%29
|
||||
_, err := rand.Read(span.TraceID[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
_, err := rand.Read(span.SpanID[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if hasParent {
|
||||
span.ParentSpanID = parent.SpanID
|
||||
}
|
||||
|
||||
// Apply options to override defaults.
|
||||
for _, option := range options {
|
||||
option(&span)
|
||||
}
|
||||
|
||||
span.Sampled = span.sample()
|
||||
|
||||
if hasParent {
|
||||
span.recorder = parent.spanRecorder()
|
||||
} else {
|
||||
span.recorder = &spanRecorder{}
|
||||
}
|
||||
span.recorder.record(&span)
|
||||
|
||||
// Update scope so that all events include a trace context, allowing
|
||||
// Sentry to correlate errors to transactions/spans.
|
||||
hubFromContext(ctx).Scope().SetContext("trace", span.traceContext())
|
||||
|
||||
return &span
|
||||
}
|
||||
|
||||
// Finish sets the span's end time, unless already set. If the span is the root
|
||||
// of a span tree, Finish sends the span tree to Sentry as a transaction.
|
||||
func (s *Span) Finish() {
|
||||
// TODO(tracing): maybe make Finish run at most once, such that
|
||||
// (incorrectly) calling it twice never double sends to Sentry.
|
||||
|
||||
if s.EndTime.IsZero() {
|
||||
s.EndTime = monotonicTimeSince(s.StartTime)
|
||||
}
|
||||
if !s.Sampled.Bool() {
|
||||
return
|
||||
}
|
||||
event := s.toEvent()
|
||||
if event == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(tracing): add breadcrumbs
|
||||
// (see https://github.com/getsentry/sentry-python/blob/f6f3525f8812f609/sentry_sdk/tracing.py#L372)
|
||||
|
||||
hub := hubFromContext(s.ctx)
|
||||
if hub.Scope().Transaction() == "" {
|
||||
Logger.Printf("Missing transaction name for span with op = %q", s.Op)
|
||||
}
|
||||
hub.CaptureEvent(event)
|
||||
}
|
||||
|
||||
// Context returns the context containing the span.
|
||||
func (s *Span) Context() context.Context { return s.ctx }
|
||||
|
||||
// StartChild starts a new child span.
|
||||
//
|
||||
// The call span.StartChild(operation, options...) is a shortcut for
|
||||
// StartSpan(span.Context(), operation, options...).
|
||||
func (s *Span) StartChild(operation string, options ...SpanOption) *Span {
|
||||
return StartSpan(s.Context(), operation, options...)
|
||||
}
|
||||
|
||||
// SetTag sets a tag on the span. It is recommended to use SetTag instead of
|
||||
// accessing the tags map directly as SetTag takes care of initializing the map
|
||||
// when necessary.
|
||||
func (s *Span) SetTag(name, value string) {
|
||||
if s.Tags == nil {
|
||||
s.Tags = make(map[string]string)
|
||||
}
|
||||
s.Tags[name] = value
|
||||
}
|
||||
|
||||
// TODO(tracing): maybe add shortcuts to get/set transaction name. Right now the
|
||||
// transaction name is in the Scope, as it has existed there historically, prior
|
||||
// to tracing.
|
||||
//
|
||||
// See Scope.Transaction() and Scope.SetTransaction().
|
||||
//
|
||||
// func (s *Span) TransactionName() string
|
||||
// func (s *Span) SetTransactionName(name string)
|
||||
|
||||
// ToSentryTrace returns the trace propagation value used with the sentry-trace
|
||||
// HTTP header.
|
||||
func (s *Span) ToSentryTrace() string {
|
||||
// TODO(tracing): add instrumentation for outgoing HTTP requests using
|
||||
// ToSentryTrace.
|
||||
var b strings.Builder
|
||||
fmt.Fprintf(&b, "%s-%s", s.TraceID.Hex(), s.SpanID.Hex())
|
||||
switch s.Sampled {
|
||||
case SampledTrue:
|
||||
b.WriteString("-1")
|
||||
case SampledFalse:
|
||||
b.WriteString("-0")
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// sentryTracePattern matches either
|
||||
//
|
||||
// TRACE_ID - SPAN_ID
|
||||
// [[:xdigit:]]{32}-[[:xdigit:]]{16}
|
||||
//
|
||||
// or
|
||||
//
|
||||
// TRACE_ID - SPAN_ID - SAMPLED
|
||||
// [[:xdigit:]]{32}-[[:xdigit:]]{16}-[01]
|
||||
var sentryTracePattern = regexp.MustCompile(`^([[:xdigit:]]{32})-([[:xdigit:]]{16})(?:-([01]))?$`)
|
||||
|
||||
// updateFromSentryTrace parses a sentry-trace HTTP header (as returned by
|
||||
// ToSentryTrace) and updates fields of the span. If the header cannot be
|
||||
// recognized as valid, the span is left unchanged.
|
||||
func (s *Span) updateFromSentryTrace(header []byte) {
|
||||
m := sentryTracePattern.FindSubmatch(header)
|
||||
if m == nil {
|
||||
// no match
|
||||
return
|
||||
}
|
||||
_, _ = hex.Decode(s.TraceID[:], m[1])
|
||||
_, _ = hex.Decode(s.ParentSpanID[:], m[2])
|
||||
if len(m[3]) != 0 {
|
||||
switch m[3][0] {
|
||||
case '0':
|
||||
s.Sampled = SampledFalse
|
||||
case '1':
|
||||
s.Sampled = SampledTrue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Span) MarshalJSON() ([]byte, error) {
|
||||
// span aliases Span to allow calling json.Marshal without an infinite loop.
|
||||
// It preserves all fields while none of the attached methods.
|
||||
type span Span
|
||||
var parentSpanID string
|
||||
if s.ParentSpanID != zeroSpanID {
|
||||
parentSpanID = s.ParentSpanID.String()
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
*span
|
||||
ParentSpanID string `json:"parent_span_id,omitempty"`
|
||||
}{
|
||||
span: (*span)(s),
|
||||
ParentSpanID: parentSpanID,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Span) sample() Sampled {
|
||||
// https://develop.sentry.dev/sdk/unified-api/tracing/#sampling
|
||||
// #1 explicit sampling decision via StartSpan options.
|
||||
if s.Sampled != SampledUndefined {
|
||||
return s.Sampled
|
||||
}
|
||||
hub := hubFromContext(s.ctx)
|
||||
var clientOptions ClientOptions
|
||||
client := hub.Client()
|
||||
if client != nil {
|
||||
clientOptions = hub.Client().Options()
|
||||
}
|
||||
samplingContext := SamplingContext{Span: s, Parent: s.parent}
|
||||
// Variant for non-transaction spans: they inherit the parent decision.
|
||||
// TracesSampler only runs for the root span.
|
||||
// Note: non-transaction should always have a parent, but we check both
|
||||
// conditions anyway -- the first for semantic meaning, the second to
|
||||
// avoid a nil pointer dereference.
|
||||
if !s.isTransaction && s.parent != nil {
|
||||
return s.parent.Sampled
|
||||
}
|
||||
// #2 use TracesSampler from ClientOptions.
|
||||
sampler := clientOptions.TracesSampler
|
||||
if sampler != nil {
|
||||
return sampler.Sample(samplingContext)
|
||||
}
|
||||
// #3 inherit parent decision.
|
||||
if s.parent != nil {
|
||||
return s.parent.Sampled
|
||||
}
|
||||
// #4 uniform sampling using TracesSampleRate.
|
||||
sampler = UniformTracesSampler(clientOptions.TracesSampleRate)
|
||||
return sampler.Sample(samplingContext)
|
||||
}
|
||||
|
||||
func (s *Span) toEvent() *Event {
|
||||
if !s.isTransaction {
|
||||
return nil // only transactions can be transformed into events
|
||||
}
|
||||
hub := hubFromContext(s.ctx)
|
||||
|
||||
children := s.recorder.children()
|
||||
finished := make([]*Span, 0, len(children))
|
||||
for _, child := range children {
|
||||
if child.EndTime.IsZero() {
|
||||
Logger.Printf("Dropped unfinished span: Op=%q TraceID=%s SpanID=%s", child.Op, child.TraceID, child.SpanID)
|
||||
continue
|
||||
}
|
||||
finished = append(finished, child)
|
||||
}
|
||||
|
||||
return &Event{
|
||||
Type: transactionType,
|
||||
Transaction: hub.Scope().Transaction(),
|
||||
Contexts: map[string]interface{}{
|
||||
"trace": s.traceContext(),
|
||||
},
|
||||
Tags: s.Tags,
|
||||
Timestamp: s.EndTime,
|
||||
StartTime: s.StartTime,
|
||||
Spans: finished,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Span) traceContext() *TraceContext {
|
||||
return &TraceContext{
|
||||
TraceID: s.TraceID,
|
||||
SpanID: s.SpanID,
|
||||
ParentSpanID: s.ParentSpanID,
|
||||
Op: s.Op,
|
||||
Description: s.Description,
|
||||
Status: s.Status,
|
||||
}
|
||||
}
|
||||
|
||||
// spanRecorder stores the span tree. Guaranteed to be non-nil.
|
||||
func (s *Span) spanRecorder() *spanRecorder { return s.recorder }
|
||||
|
||||
// TraceID identifies a trace.
|
||||
type TraceID [16]byte
|
||||
|
||||
func (id TraceID) Hex() []byte {
|
||||
b := make([]byte, hex.EncodedLen(len(id)))
|
||||
hex.Encode(b, id[:])
|
||||
return b
|
||||
}
|
||||
|
||||
func (id TraceID) String() string {
|
||||
return string(id.Hex())
|
||||
}
|
||||
|
||||
func (id TraceID) MarshalText() ([]byte, error) {
|
||||
return id.Hex(), nil
|
||||
}
|
||||
|
||||
// SpanID identifies a span.
|
||||
type SpanID [8]byte
|
||||
|
||||
func (id SpanID) Hex() []byte {
|
||||
b := make([]byte, hex.EncodedLen(len(id)))
|
||||
hex.Encode(b, id[:])
|
||||
return b
|
||||
}
|
||||
|
||||
func (id SpanID) String() string {
|
||||
return string(id.Hex())
|
||||
}
|
||||
|
||||
func (id SpanID) MarshalText() ([]byte, error) {
|
||||
return id.Hex(), nil
|
||||
}
|
||||
|
||||
// Zero values of TraceID and SpanID used for comparisons.
|
||||
var (
|
||||
zeroTraceID TraceID
|
||||
zeroSpanID SpanID
|
||||
)
|
||||
|
||||
// SpanStatus is the status of a span.
|
||||
type SpanStatus uint8
|
||||
|
||||
// Implementation note:
|
||||
//
|
||||
// In Relay (ingestion), the SpanStatus type is an enum used as
|
||||
// Annotated<SpanStatus> when embedded in structs, making it effectively
|
||||
// Option<SpanStatus>. It means the status is either null or one of the known
|
||||
// string values.
|
||||
//
|
||||
// In Snuba (search), the SpanStatus is stored as an uint8 and defaulted to 2
|
||||
// ("unknown") when not set. It means that Discover searches for
|
||||
// `transaction.status:unknown` return both transactions/spans with status
|
||||
// `null` or `"unknown"`. Searches for `transaction.status:""` return nothing.
|
||||
//
|
||||
// With that in mind, the Go SDK default is SpanStatusUndefined, which is
|
||||
// null/omitted when serializing to JSON, but integrations may update the status
|
||||
// automatically based on contextual information.
|
||||
|
||||
const (
|
||||
SpanStatusUndefined SpanStatus = iota
|
||||
SpanStatusOK
|
||||
SpanStatusCanceled
|
||||
SpanStatusUnknown
|
||||
SpanStatusInvalidArgument
|
||||
SpanStatusDeadlineExceeded
|
||||
SpanStatusNotFound
|
||||
SpanStatusAlreadyExists
|
||||
SpanStatusPermissionDenied
|
||||
SpanStatusResourceExhausted
|
||||
SpanStatusFailedPrecondition
|
||||
SpanStatusAborted
|
||||
SpanStatusOutOfRange
|
||||
SpanStatusUnimplemented
|
||||
SpanStatusInternalError
|
||||
SpanStatusUnavailable
|
||||
SpanStatusDataLoss
|
||||
SpanStatusUnauthenticated
|
||||
maxSpanStatus
|
||||
)
|
||||
|
||||
func (ss SpanStatus) String() string {
|
||||
if ss >= maxSpanStatus {
|
||||
return ""
|
||||
}
|
||||
m := [maxSpanStatus]string{
|
||||
"",
|
||||
"ok",
|
||||
"cancelled", // [sic]
|
||||
"unknown",
|
||||
"invalid_argument",
|
||||
"deadline_exceeded",
|
||||
"not_found",
|
||||
"already_exists",
|
||||
"permission_denied",
|
||||
"resource_exhausted",
|
||||
"failed_precondition",
|
||||
"aborted",
|
||||
"out_of_range",
|
||||
"unimplemented",
|
||||
"internal_error",
|
||||
"unavailable",
|
||||
"data_loss",
|
||||
"unauthenticated",
|
||||
}
|
||||
return m[ss]
|
||||
}
|
||||
|
||||
func (ss SpanStatus) MarshalJSON() ([]byte, error) {
|
||||
s := ss.String()
|
||||
if s == "" {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
return json.Marshal(s)
|
||||
}
|
||||
|
||||
// A TraceContext carries information about an ongoing trace and is meant to be
|
||||
// stored in Event.Contexts (as *TraceContext).
|
||||
type TraceContext struct {
|
||||
TraceID TraceID `json:"trace_id"`
|
||||
SpanID SpanID `json:"span_id"`
|
||||
ParentSpanID SpanID `json:"parent_span_id"`
|
||||
Op string `json:"op,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Status SpanStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
func (tc *TraceContext) MarshalJSON() ([]byte, error) {
|
||||
// traceContext aliases TraceContext to allow calling json.Marshal without
|
||||
// an infinite loop. It preserves all fields while none of the attached
|
||||
// methods.
|
||||
type traceContext TraceContext
|
||||
var parentSpanID string
|
||||
if tc.ParentSpanID != zeroSpanID {
|
||||
parentSpanID = tc.ParentSpanID.String()
|
||||
}
|
||||
return json.Marshal(struct {
|
||||
*traceContext
|
||||
ParentSpanID string `json:"parent_span_id,omitempty"`
|
||||
}{
|
||||
traceContext: (*traceContext)(tc),
|
||||
ParentSpanID: parentSpanID,
|
||||
})
|
||||
}
|
||||
|
||||
// Sampled signifies a sampling decision.
|
||||
type Sampled int8
|
||||
|
||||
// The possible trace sampling decisions are: SampledFalse, SampledUndefined
|
||||
// (default) and SampledTrue.
|
||||
const (
|
||||
SampledFalse Sampled = -1 + iota
|
||||
SampledUndefined
|
||||
SampledTrue
|
||||
)
|
||||
|
||||
func (s Sampled) String() string {
|
||||
switch s {
|
||||
case SampledFalse:
|
||||
return "SampledFalse"
|
||||
case SampledUndefined:
|
||||
return "SampledUndefined"
|
||||
case SampledTrue:
|
||||
return "SampledTrue"
|
||||
default:
|
||||
return fmt.Sprintf("SampledInvalid(%d)", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Bool returns true if the sample decision is SampledTrue, false otherwise.
|
||||
func (s Sampled) Bool() bool {
|
||||
return s == SampledTrue
|
||||
}
|
||||
|
||||
// A SpanOption is a function that can modify the properties of a span.
|
||||
type SpanOption func(s *Span)
|
||||
|
||||
// The TransactionName option sets the name of the current transaction.
|
||||
//
|
||||
// A span tree has a single transaction name, therefore using this option when
|
||||
// starting a span affects the span tree as a whole, potentially overwriting a
|
||||
// name set previously.
|
||||
func TransactionName(name string) SpanOption {
|
||||
return func(s *Span) {
|
||||
hubFromContext(s.Context()).Scope().SetTransaction(name)
|
||||
}
|
||||
}
|
||||
|
||||
// ContinueFromRequest returns a span option that updates the span to continue
|
||||
// an existing trace. If it cannot detect an existing trace in the request, the
|
||||
// span will be left unchanged.
|
||||
func ContinueFromRequest(r *http.Request) SpanOption {
|
||||
return func(s *Span) {
|
||||
trace := r.Header.Get("sentry-trace")
|
||||
if trace == "" {
|
||||
return
|
||||
}
|
||||
s.updateFromSentryTrace([]byte(trace))
|
||||
}
|
||||
}
|
||||
|
||||
// spanContextKey is used to store span values in contexts.
|
||||
type spanContextKey struct{}
|
||||
|
||||
// TransactionFromContext returns the root span of the current transaction. It
|
||||
// returns nil if no transaction is tracked in the context.
|
||||
func TransactionFromContext(ctx context.Context) *Span {
|
||||
if span, ok := ctx.Value(spanContextKey{}).(*Span); ok {
|
||||
return span.recorder.root()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// spanFromContext returns the last span stored in the context or a dummy
|
||||
// non-nil span.
|
||||
//
|
||||
// TODO(tracing): consider exporting this. Without this, users cannot retrieve a
|
||||
// span from a context since spanContextKey is not exported.
|
||||
//
|
||||
// This can be added retroactively, and in the meantime think better whether it
|
||||
// should return nil (like GetHubFromContext), always non-nil (like
|
||||
// HubFromContext), or both: two exported functions.
|
||||
//
|
||||
// Note the equivalence:
|
||||
//
|
||||
// SpanFromContext(ctx).StartChild(...) === StartSpan(ctx, ...)
|
||||
//
|
||||
// So we don't aim spanFromContext at creating spans, but mutating existing
|
||||
// spans that you'd have no access otherwise (because it was created in code you
|
||||
// do not control, for example SDK auto-instrumentation).
|
||||
//
|
||||
// For now we provide TransactionFromContext, which solves the more common case
|
||||
// of setting tags, etc, on the current transaction.
|
||||
func spanFromContext(ctx context.Context) *Span {
|
||||
if span, ok := ctx.Value(spanContextKey{}).(*Span); ok {
|
||||
return span
|
||||
}
|
||||
return nil
|
||||
}
|
||||
86
vendor/github.com/getsentry/sentry-go/transport.go
generated
vendored
86
vendor/github.com/getsentry/sentry-go/transport.go
generated
vendored
@@ -49,6 +49,8 @@ func getTLSConfig(options ClientOptions) *tls.Config {
|
||||
}
|
||||
|
||||
func retryAfter(now time.Time, r *http.Response) time.Duration {
|
||||
// TODO(tracing): handle x-sentry-rate-limits, separate rate limiting
|
||||
// per data type (error event, transaction, etc).
|
||||
retryAfterHeader := r.Header["Retry-After"]
|
||||
|
||||
if retryAfterHeader == nil {
|
||||
@@ -72,7 +74,7 @@ func getRequestBodyFromEvent(event *Event) []byte {
|
||||
return body
|
||||
}
|
||||
|
||||
partialMarshallMessage := fmt.Sprintf("Could not encode original event as JSON. "+
|
||||
msg := fmt.Sprintf("Could not encode original event as JSON. "+
|
||||
"Succeeded by removing Breadcrumbs, Contexts and Extra. "+
|
||||
"Please verify the data you attach to the scope. "+
|
||||
"Error: %s", err)
|
||||
@@ -80,54 +82,76 @@ func getRequestBodyFromEvent(event *Event) []byte {
|
||||
event.Breadcrumbs = nil
|
||||
event.Contexts = nil
|
||||
event.Extra = map[string]interface{}{
|
||||
"info": partialMarshallMessage,
|
||||
"info": msg,
|
||||
}
|
||||
body, err = json.Marshal(event)
|
||||
if err == nil {
|
||||
Logger.Println(partialMarshallMessage)
|
||||
Logger.Println(msg)
|
||||
return body
|
||||
}
|
||||
|
||||
// This should _only_ happen when Event.Exception[0].Stacktrace.Frames[0].Vars is unserializable
|
||||
// Which won't ever happen, as we don't use it now (although it's the part of public interface accepted by Sentry)
|
||||
// Juuust in case something, somehow goes utterly wrong.
|
||||
Logger.Println("Event couldn't be marshalled, even with stripped contextual data. Skipping delivery. " +
|
||||
Logger.Println("Event couldn't be marshaled, even with stripped contextual data. Skipping delivery. " +
|
||||
"Please notify the SDK owners with possibly broken payload.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEnvelopeFromBody(body []byte, now time.Time) *bytes.Buffer {
|
||||
func transactionEnvelopeFromBody(eventID EventID, sentAt time.Time, body json.RawMessage) (*bytes.Buffer, error) {
|
||||
var b bytes.Buffer
|
||||
fmt.Fprintf(&b, `{"sent_at":"%s"}`, now.UTC().Format(time.RFC3339Nano))
|
||||
fmt.Fprint(&b, "\n", `{"type":"transaction"}`, "\n")
|
||||
b.Write(body)
|
||||
return &b
|
||||
enc := json.NewEncoder(&b)
|
||||
// envelope header
|
||||
err := enc.Encode(struct {
|
||||
EventID EventID `json:"event_id"`
|
||||
SentAt time.Time `json:"sent_at"`
|
||||
}{
|
||||
EventID: eventID,
|
||||
SentAt: sentAt,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// item header
|
||||
err = enc.Encode(struct {
|
||||
Type string `json:"type"`
|
||||
Length int `json:"length"`
|
||||
}{
|
||||
Type: transactionType,
|
||||
Length: len(body),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// payload
|
||||
err = enc.Encode(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
func getRequestFromEvent(event *Event, dsn *Dsn) (*http.Request, error) {
|
||||
body := getRequestBodyFromEvent(event)
|
||||
if body == nil {
|
||||
return nil, errors.New("event could not be marshalled")
|
||||
return nil, errors.New("event could not be marshaled")
|
||||
}
|
||||
|
||||
if event.Type == transactionType {
|
||||
env := getEnvelopeFromBody(body, time.Now())
|
||||
request, _ := http.NewRequest(
|
||||
b, err := transactionEnvelopeFromBody(event.EventID, time.Now(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return http.NewRequest(
|
||||
http.MethodPost,
|
||||
dsn.EnvelopeAPIURL().String(),
|
||||
env,
|
||||
b,
|
||||
)
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
request, _ := http.NewRequest(
|
||||
return http.NewRequest(
|
||||
http.MethodPost,
|
||||
dsn.StoreAPIURL().String(),
|
||||
bytes.NewBuffer(body),
|
||||
bytes.NewReader(body),
|
||||
)
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// ================================
|
||||
@@ -249,9 +273,15 @@ func (t *HTTPTransport) SendEvent(event *Event) {
|
||||
|
||||
select {
|
||||
case b.items <- request:
|
||||
var eventType string
|
||||
if event.Type == transactionType {
|
||||
eventType = "transaction"
|
||||
} else {
|
||||
eventType = fmt.Sprintf("%s event", event.Level)
|
||||
}
|
||||
Logger.Printf(
|
||||
"Sending %s event [%s] to %s project: %d\n",
|
||||
event.Level,
|
||||
"Sending %s [%s] to %s project: %d",
|
||||
eventType,
|
||||
event.EventID,
|
||||
t.dsn.host,
|
||||
t.dsn.projectID,
|
||||
@@ -427,9 +457,15 @@ func (t *HTTPSyncTransport) SendEvent(event *Event) {
|
||||
request.Header.Set(headerKey, headerValue)
|
||||
}
|
||||
|
||||
var eventType string
|
||||
if event.Type == transactionType {
|
||||
eventType = "transaction"
|
||||
} else {
|
||||
eventType = fmt.Sprintf("%s event", event.Level)
|
||||
}
|
||||
Logger.Printf(
|
||||
"Sending %s event [%s] to %s project: %d\n",
|
||||
event.Level,
|
||||
"Sending %s [%s] to %s project: %d",
|
||||
eventType,
|
||||
event.EventID,
|
||||
t.dsn.host,
|
||||
t.dsn.projectID,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user