mirror of
https://github.com/mattermost/mattermost.git
synced 2025-02-25 18:55:24 -06:00
Updating server dependancies. Also adding github.com/jaytaylor/html2text and gopkg.in/gomail.v2 (#5748)
This commit is contained in:
committed by
GitHub
parent
3ada7a41a7
commit
c281ee3b61
40
glide.lock
generated
40
glide.lock
generated
@@ -1,5 +1,5 @@
|
||||
hash: 42839eb256dd5b5607d93cd50372cf55881de6e874a63525862ed3f0ac47682b
|
||||
updated: 2017-02-01T10:22:19.702548922-05:00
|
||||
hash: 978c0474f3432a26942e213ca1ba8e957fa5a34ee4652d736dae152fbd6d5384
|
||||
updated: 2017-03-13T11:12:52.056509274-04:00
|
||||
imports:
|
||||
- name: github.com/alecthomas/log4go
|
||||
version: e5dc62318d9bd58682f1dceb53a4b24e8253682f
|
||||
@@ -22,7 +22,7 @@ imports:
|
||||
- name: github.com/go-sql-driver/mysql
|
||||
version: a0583e0143b1624142adab07e0e97fe106d99561
|
||||
- name: github.com/goamz/goamz
|
||||
version: b2c2eaf25cbb87f41087aa796facf82113809ed5
|
||||
version: c35091c30f44b7f151ec9028b895465a191d1ea7
|
||||
subpackages:
|
||||
- aws
|
||||
- s3
|
||||
@@ -32,7 +32,7 @@ imports:
|
||||
- raster
|
||||
- truetype
|
||||
- name: github.com/golang/protobuf
|
||||
version: 8ee79997227bf9b34611aee7946ae64735e6fd93
|
||||
version: c9c7427a2a70d2eb3bafa0ab2dc163e45f143317
|
||||
subpackages:
|
||||
- proto
|
||||
- name: github.com/gorilla/context
|
||||
@@ -49,10 +49,12 @@ imports:
|
||||
- simplelru
|
||||
- name: github.com/inconshreveable/mousetrap
|
||||
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
- name: github.com/jaytaylor/html2text
|
||||
version: f3b8a7ca0a23f0a806b2e1ad1247de39ecde54bf
|
||||
- name: github.com/jehiah/go-strftime
|
||||
version: 834e15c05a45371503440cc195bbd05c9a0968d9
|
||||
- name: github.com/lib/pq
|
||||
version: a6657b2386e9b8be76484c08711b02c7cf867ead
|
||||
version: e4af84aab01e159ca479940dbde304519e8e8811
|
||||
subpackages:
|
||||
- oid
|
||||
- name: github.com/mattermost/rsc
|
||||
@@ -66,7 +68,7 @@ imports:
|
||||
subpackages:
|
||||
- pbutil
|
||||
- name: github.com/miekg/dns
|
||||
version: 99f84ae56e75126dd77e5de4fae2ea034a468ca1
|
||||
version: c862b7e359850847d4945cce311db2ea90cab7c0
|
||||
- name: github.com/minio/minio-go
|
||||
version: 52cc94e879db78c2e2c6e160869df943137ec4cd
|
||||
subpackages:
|
||||
@@ -84,7 +86,7 @@ imports:
|
||||
- i18n/language
|
||||
- i18n/translation
|
||||
- name: github.com/NYTimes/gziphandler
|
||||
version: 6710af535839f57c687b62c4c23d649f9545d885
|
||||
version: fb3533722e14198abe471546c9798fd556531451
|
||||
- name: github.com/pborman/uuid
|
||||
version: a97ce2ca70fa5a848076093f05e639a89ca34d06
|
||||
- name: github.com/prometheus/client_golang
|
||||
@@ -92,15 +94,15 @@ imports:
|
||||
subpackages:
|
||||
- prometheus
|
||||
- name: github.com/prometheus/client_model
|
||||
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
version: 6f3806018612930941127f2a7c6c453ba2c527d2
|
||||
subpackages:
|
||||
- go
|
||||
- name: github.com/prometheus/common
|
||||
version: dd2f054febf4a6c00f2343686efb775948a8bff4
|
||||
version: 49fee292b27bfff7f354ee0f64e1bc4850462edf
|
||||
subpackages:
|
||||
- expfmt
|
||||
- name: github.com/prometheus/procfs
|
||||
version: 1878d9fbb537119d24b21ca07effd591627cd160
|
||||
version: a1dba9ce8baed984a2495b658c82687f8157b98f
|
||||
- name: github.com/rsc/letsencrypt
|
||||
version: 76104d26167d38b6a0010f42bfc8ec5487742e8b
|
||||
- name: github.com/rwcarlsen/goexif
|
||||
@@ -113,25 +115,25 @@ imports:
|
||||
- name: github.com/segmentio/backo-go
|
||||
version: 204274ad699c0983a70203a566887f17a717fef4
|
||||
- name: github.com/spf13/cobra
|
||||
version: 35136c09d8da66b901337c6e86fd8e88a1a255bd
|
||||
version: 16c014f1a19d865b765b420e74508f80eb831ada
|
||||
- name: github.com/spf13/pflag
|
||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
- name: github.com/tylerb/graceful
|
||||
version: 0e9129e9c6d47da90dc0c188b26bd7bb1dab53cd
|
||||
version: d72b0151351a13d0421b763b88f791469c4f5dc7
|
||||
- name: github.com/xenolf/lego
|
||||
version: f5d538caab6dc0c167d4e32990c79bbf9eff578c
|
||||
version: 6cac0ea7d8b28c889f709ec7fa92e92b82f490dd
|
||||
subpackages:
|
||||
- acme
|
||||
- name: github.com/xtgo/uuid
|
||||
version: a0b114877d4caeffbd7f87e3757c17fce570fea7
|
||||
- name: golang.org/x/crypto
|
||||
version: dc137beb6cce2043eb6b5f223ab8bf51c32459f4
|
||||
version: 728b753d0135da6801d45a38e6f43ff55779c5c2
|
||||
subpackages:
|
||||
- bcrypt
|
||||
- blowfish
|
||||
- ocsp
|
||||
- name: golang.org/x/image
|
||||
version: 83686c547965220f8b5d75e83ddc67d73420a89f
|
||||
version: 793f3be7dac93749dec06ae3fbe7d0ded4bdcf3d
|
||||
subpackages:
|
||||
- bmp
|
||||
- font
|
||||
@@ -139,14 +141,14 @@ imports:
|
||||
- tiff
|
||||
- tiff/lzw
|
||||
- name: golang.org/x/net
|
||||
version: f2499483f923065a842d38eb4c7f1927e6fc6e6d
|
||||
version: a6577fac2d73be281a500b310739095313165611
|
||||
subpackages:
|
||||
- context
|
||||
- html
|
||||
- html/atom
|
||||
- publicsuffix
|
||||
- name: golang.org/x/sys
|
||||
version: 7a6e5648d140666db5d920909e082ca00a87ba2c
|
||||
version: 99f16d856c9836c42d24e7ab64ea72916925fa97
|
||||
subpackages:
|
||||
- unix
|
||||
- name: golang.org/x/time
|
||||
@@ -157,6 +159,8 @@ imports:
|
||||
version: 4e86f4367175e39f69d9358a5f17b4dda270378d
|
||||
- name: gopkg.in/fsnotify.v1
|
||||
version: 629574ca2a5df945712d3079857300b5e4da0236
|
||||
- name: gopkg.in/gomail.v2
|
||||
version: 41f3572897373c5538c50a2402db15db079fa4fd
|
||||
- name: gopkg.in/square/go-jose.v1
|
||||
version: aa2e30fdd1fe9dd3394119af66451ae790d50e0d
|
||||
subpackages:
|
||||
@@ -168,5 +172,5 @@ imports:
|
||||
- store
|
||||
- store/memstore
|
||||
- name: gopkg.in/yaml.v2
|
||||
version: 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
version: a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||
testImports: []
|
||||
|
||||
@@ -4,6 +4,7 @@ import:
|
||||
- package: github.com/alecthomas/log4go
|
||||
- package: github.com/dgryski/dgoogauth
|
||||
- package: github.com/disintegration/imaging
|
||||
version: v1.0.0
|
||||
- package: github.com/go-gorp/gorp
|
||||
version: 0c9bc0918534d133cedb439a24adc7cbe66e4a9d
|
||||
- package: github.com/go-ldap/ldap
|
||||
@@ -79,3 +80,6 @@ import:
|
||||
- package: github.com/dyatlov/go-opengraph
|
||||
subpackages:
|
||||
- opengraph
|
||||
- package: github.com/jaytaylor/html2text
|
||||
- package: gopkg.in/gomail.v2
|
||||
version: 2.0.0
|
||||
|
||||
1
vendor/github.com/goamz/goamz/aws/aws.go
generated
vendored
1
vendor/github.com/goamz/goamz/aws/aws.go
generated
vendored
@@ -71,6 +71,7 @@ var Regions = map[string]Region{
|
||||
EUCentral.Name: EUCentral,
|
||||
EUWest.Name: EUWest,
|
||||
USEast.Name: USEast,
|
||||
USEast2.Name: USEast2,
|
||||
USWest.Name: USWest,
|
||||
USWest2.Name: USWest2,
|
||||
USGovWest.Name: USGovWest,
|
||||
|
||||
23
vendor/github.com/goamz/goamz/aws/regions.go
generated
vendored
23
vendor/github.com/goamz/goamz/aws/regions.go
generated
vendored
@@ -46,6 +46,29 @@ var USEast = Region{
|
||||
"https://streams.dynamodb.us-east-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var USEast2 = Region{
|
||||
"us-east-2",
|
||||
"https://ec2.us-east-2.amazonaws.com",
|
||||
"https://s3.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"",
|
||||
"https://sns.us-east-2.amazonaws.com",
|
||||
"https://sqs.us-east-2.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-east-2.amazonaws.com",
|
||||
"https://dynamodb.us-east-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-east-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-east-2.amazonaws.com", V2Signature},
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-east-2.amazonaws.com",
|
||||
"https://ecs.us-east-2.amazonaws.com",
|
||||
"https://streams.dynamodb.us-east-2.amazonaws.com",
|
||||
}
|
||||
|
||||
var USWest = Region{
|
||||
"us-west-1",
|
||||
"https://ec2.us-west-1.amazonaws.com",
|
||||
|
||||
2
vendor/github.com/goamz/goamz/sqs/sqs.go
generated
vendored
2
vendor/github.com/goamz/goamz/sqs/sqs.go
generated
vendored
@@ -48,6 +48,8 @@ func NewFrom(accessKey, secretKey, region string) (*SQS, error) {
|
||||
switch region {
|
||||
case "us.east", "us.east.1":
|
||||
aws_region = aws.USEast
|
||||
case "us.east.2":
|
||||
aws_region = aws.USEast2
|
||||
case "us.west", "us.west.1":
|
||||
aws_region = aws.USWest
|
||||
case "us.west.2":
|
||||
|
||||
2
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
2
vendor/github.com/golang/protobuf/descriptor/descriptor.go
generated
vendored
@@ -43,7 +43,7 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
protobuf "google.golang.org/genproto/protobuf"
|
||||
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
)
|
||||
|
||||
// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
|
||||
|
||||
2
vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
generated
vendored
2
vendor/github.com/golang/protobuf/descriptor/descriptor_test.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/descriptor"
|
||||
tpb "github.com/golang/protobuf/proto/testdata"
|
||||
protobuf "google.golang.org/genproto/protobuf"
|
||||
protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
)
|
||||
|
||||
func TestMessage(t *testing.T) {
|
||||
|
||||
6
vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
generated
vendored
6
vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile
generated
vendored
@@ -45,9 +45,9 @@ my_test/test.pb.go: my_test/test.proto
|
||||
|
||||
golden:
|
||||
make -B my_test/test.pb.go
|
||||
sed -i '/return.*fileDescriptor/d' my_test/test.pb.go
|
||||
sed -i '/^var fileDescriptor/,/^}/d' my_test/test.pb.go
|
||||
sed -i '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go
|
||||
sed -i -e '/return.*fileDescriptor/d' my_test/test.pb.go
|
||||
sed -i -e '/^var fileDescriptor/,/^}/d' my_test/test.pb.go
|
||||
sed -i -e '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go
|
||||
gofmt -w my_test/test.pb.go
|
||||
diff -w my_test/test.pb.go my_test/test.pb.go.golden
|
||||
|
||||
|
||||
7
vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
generated
vendored
7
vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go
generated
vendored
@@ -319,7 +319,7 @@ func (m *Reply) GetCompactKeys() []int32 {
|
||||
type Reply_Entry struct {
|
||||
KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
|
||||
Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
|
||||
XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"`
|
||||
XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -389,6 +389,7 @@ var E_ReplyExtensions_Time = &proto.ExtensionDesc{
|
||||
Field: 101,
|
||||
Name: "my.test.ReplyExtensions.time",
|
||||
Tag: "fixed64,101,opt,name=time",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
|
||||
@@ -397,6 +398,7 @@ var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
|
||||
Field: 105,
|
||||
Name: "my.test.ReplyExtensions.carrot",
|
||||
Tag: "bytes,105,opt,name=carrot",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
|
||||
@@ -405,6 +407,7 @@ var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
|
||||
Field: 101,
|
||||
Name: "my.test.ReplyExtensions.donut",
|
||||
Tag: "bytes,101,opt,name=donut",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
type OtherReplyExtensions struct {
|
||||
@@ -832,6 +835,7 @@ var E_Tag = &proto.ExtensionDesc{
|
||||
Field: 103,
|
||||
Name: "my.test.tag",
|
||||
Tag: "bytes,103,opt,name=tag",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
var E_Donut = &proto.ExtensionDesc{
|
||||
@@ -840,6 +844,7 @@ var E_Donut = &proto.ExtensionDesc{
|
||||
Field: 106,
|
||||
Name: "my.test.donut",
|
||||
Tag: "bytes,106,opt,name=donut",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
7
vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
generated
vendored
7
vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden
generated
vendored
@@ -319,7 +319,7 @@ func (m *Reply) GetCompactKeys() []int32 {
|
||||
type Reply_Entry struct {
|
||||
KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"`
|
||||
Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"`
|
||||
XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=myFieldName2" json:"_my_field_name_2,omitempty"`
|
||||
XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
@@ -389,6 +389,7 @@ var E_ReplyExtensions_Time = &proto.ExtensionDesc{
|
||||
Field: 101,
|
||||
Name: "my.test.ReplyExtensions.time",
|
||||
Tag: "fixed64,101,opt,name=time",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
|
||||
@@ -397,6 +398,7 @@ var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{
|
||||
Field: 105,
|
||||
Name: "my.test.ReplyExtensions.carrot",
|
||||
Tag: "bytes,105,opt,name=carrot",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
|
||||
@@ -405,6 +407,7 @@ var E_ReplyExtensions_Donut = &proto.ExtensionDesc{
|
||||
Field: 101,
|
||||
Name: "my.test.ReplyExtensions.donut",
|
||||
Tag: "bytes,101,opt,name=donut",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
type OtherReplyExtensions struct {
|
||||
@@ -832,6 +835,7 @@ var E_Tag = &proto.ExtensionDesc{
|
||||
Field: 103,
|
||||
Name: "my.test.tag",
|
||||
Tag: "bytes,103,opt,name=tag",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
var E_Donut = &proto.ExtensionDesc{
|
||||
@@ -840,6 +844,7 @@ var E_Donut = &proto.ExtensionDesc{
|
||||
Field: 106,
|
||||
Name: "my.test.donut",
|
||||
Tag: "bytes,106,opt,name=donut",
|
||||
Filename: "my_test/test.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
24
vendor/github.com/jaytaylor/html2text/.gitignore
generated
vendored
Normal file
24
vendor/github.com/jaytaylor/html2text/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
14
vendor/github.com/jaytaylor/html2text/.travis.yml
generated
vendored
Normal file
14
vendor/github.com/jaytaylor/html2text/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
language: go
|
||||
go:
|
||||
- tip
|
||||
- 1.8
|
||||
- 1.7
|
||||
- 1.6
|
||||
- 1.5
|
||||
- 1.4
|
||||
- 1.3
|
||||
- 1.2
|
||||
notifications:
|
||||
email:
|
||||
on_success: change
|
||||
on_failure: always
|
||||
22
vendor/github.com/jaytaylor/html2text/LICENSE
generated
vendored
Normal file
22
vendor/github.com/jaytaylor/html2text/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Jay Taylor
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
116
vendor/github.com/jaytaylor/html2text/README.md
generated
vendored
Normal file
116
vendor/github.com/jaytaylor/html2text/README.md
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
# html2text
|
||||
|
||||
[](https://godoc.org/github.com/jaytaylor/html2text)
|
||||
[](https://travis-ci.org/jaytaylor/html2text)
|
||||
[](https://goreportcard.com/report/github.com/jaytaylor/html2text)
|
||||
|
||||
### Converts HTML into text
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
Ensure your emails are readable by all!
|
||||
|
||||
Turns HTML into raw text, useful for sending fancy HTML emails with a equivalently nicely formatted TXT document as a fallback (e.g. for people who don't allow HTML emails or have other display issues).
|
||||
|
||||
html2text is a simple golang package for rendering HTML into plaintext.
|
||||
|
||||
There are still lots of improvements to be had, but FWIW this has worked fine for my [basic] HTML-2-text needs.
|
||||
|
||||
It requires go 1.x or newer ;)
|
||||
|
||||
|
||||
## Download the package
|
||||
|
||||
```bash
|
||||
go get github.com/jaytaylor/html2text
|
||||
```
|
||||
|
||||
## Example usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jaytaylor/html2text"
|
||||
)
|
||||
|
||||
func main() {
|
||||
inputHtml := `
|
||||
<html>
|
||||
<head>
|
||||
<title>My Mega Service</title>
|
||||
<link rel=\"stylesheet\" href=\"main.css\">
|
||||
<style type=\"text/css\">body { color: #fff; }</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="logo">
|
||||
<a href="http://mymegaservice.com/"><img src="/logo-image.jpg" alt="Mega Service"/></a>
|
||||
</div>
|
||||
|
||||
<h1>Welcome to your new account on my service!</h1>
|
||||
|
||||
<p>
|
||||
Here is some more information:
|
||||
|
||||
<ul>
|
||||
<li>Link 1: <a href="https://example.com">Example.com</a></li>
|
||||
<li>Link 2: <a href="https://example2.com">Example2.com</a></li>
|
||||
<li>Something else</li>
|
||||
</ul>
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
text, err := html2text.FromString(inputHtml)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(text)
|
||||
}
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
Mega Service ( http://mymegaservice.com/ )
|
||||
|
||||
******************************************
|
||||
Welcome to your new account on my service!
|
||||
******************************************
|
||||
|
||||
Here is some more information:
|
||||
|
||||
* Link 1: Example.com ( https://example.com )
|
||||
* Link 2: Example2.com ( https://example2.com )
|
||||
* Something else
|
||||
```
|
||||
|
||||
|
||||
## Unit-tests
|
||||
|
||||
Running the unit-tests is straightforward and standard:
|
||||
|
||||
```bash
|
||||
go test
|
||||
```
|
||||
|
||||
|
||||
# License
|
||||
|
||||
Permissive MIT license.
|
||||
|
||||
|
||||
## Contact
|
||||
|
||||
You are more than welcome to open issues and send pull requests if you find a bug or want a new feature.
|
||||
|
||||
If you appreciate this library please feel free to drop me a line and tell me! It's always nice to hear from people who have benefitted from my work.
|
||||
|
||||
Email: jay at (my github username).com
|
||||
|
||||
Twitter: [@jtaylor](https://twitter.com/jtaylor)
|
||||
|
||||
312
vendor/github.com/jaytaylor/html2text/html2text.go
generated
vendored
Normal file
312
vendor/github.com/jaytaylor/html2text/html2text.go
generated
vendored
Normal file
@@ -0,0 +1,312 @@
|
||||
package html2text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/ssor/bom"
|
||||
|
||||
"golang.org/x/net/html"
|
||||
"golang.org/x/net/html/atom"
|
||||
)
|
||||
|
||||
var (
|
||||
spacingRe = regexp.MustCompile(`[ \r\n\t]+`)
|
||||
newlineRe = regexp.MustCompile(`\n\n+`)
|
||||
)
|
||||
|
||||
type textifyTraverseCtx struct {
|
||||
Buf bytes.Buffer
|
||||
|
||||
prefix string
|
||||
blockquoteLevel int
|
||||
lineLength int
|
||||
endsWithSpace bool
|
||||
endsWithNewline bool
|
||||
justClosedDiv bool
|
||||
}
|
||||
|
||||
func (ctx *textifyTraverseCtx) traverse(node *html.Node) error {
|
||||
switch node.Type {
|
||||
default:
|
||||
return ctx.traverseChildren(node)
|
||||
|
||||
case html.TextNode:
|
||||
data := strings.Trim(spacingRe.ReplaceAllString(node.Data, " "), " ")
|
||||
return ctx.emit(data)
|
||||
|
||||
case html.ElementNode:
|
||||
return ctx.handleElementNode(node)
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *textifyTraverseCtx) handleElementNode(node *html.Node) error {
|
||||
ctx.justClosedDiv = false
|
||||
switch node.DataAtom {
|
||||
case atom.Br:
|
||||
return ctx.emit("\n")
|
||||
|
||||
case atom.H1, atom.H2, atom.H3:
|
||||
subCtx := textifyTraverseCtx{}
|
||||
if err := subCtx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
str := subCtx.Buf.String()
|
||||
dividerLen := 0
|
||||
for _, line := range strings.Split(str, "\n") {
|
||||
if lineLen := len([]rune(line)); lineLen-1 > dividerLen {
|
||||
dividerLen = lineLen - 1
|
||||
}
|
||||
}
|
||||
divider := ""
|
||||
if node.DataAtom == atom.H1 {
|
||||
divider = strings.Repeat("*", dividerLen)
|
||||
} else {
|
||||
divider = strings.Repeat("-", dividerLen)
|
||||
}
|
||||
|
||||
if node.DataAtom == atom.H3 {
|
||||
return ctx.emit("\n\n" + str + "\n" + divider + "\n\n")
|
||||
}
|
||||
return ctx.emit("\n\n" + divider + "\n" + str + "\n" + divider + "\n\n")
|
||||
|
||||
case atom.Blockquote:
|
||||
ctx.blockquoteLevel++
|
||||
ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel) + " "
|
||||
if err := ctx.emit("\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.blockquoteLevel == 1 {
|
||||
if err := ctx.emit("\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ctx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.blockquoteLevel--
|
||||
ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel)
|
||||
if ctx.blockquoteLevel > 0 {
|
||||
ctx.prefix += " "
|
||||
}
|
||||
return ctx.emit("\n\n")
|
||||
|
||||
case atom.Div:
|
||||
if ctx.lineLength > 0 {
|
||||
if err := ctx.emit("\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := ctx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
if ctx.justClosedDiv == false {
|
||||
err = ctx.emit("\n")
|
||||
}
|
||||
ctx.justClosedDiv = true
|
||||
return err
|
||||
|
||||
case atom.Li:
|
||||
if err := ctx.emit("* "); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ctx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.emit("\n")
|
||||
|
||||
case atom.B, atom.Strong:
|
||||
subCtx := textifyTraverseCtx{}
|
||||
subCtx.endsWithSpace = true
|
||||
if err := subCtx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
str := subCtx.Buf.String()
|
||||
return ctx.emit("*" + str + "*")
|
||||
|
||||
case atom.A:
|
||||
// If image is the only child, take its alt text as the link text
|
||||
if img := node.FirstChild; img != nil && node.LastChild == img && img.DataAtom == atom.Img {
|
||||
if altText := getAttrVal(img, "alt"); altText != "" {
|
||||
ctx.emit(altText)
|
||||
}
|
||||
} else if err := ctx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hrefLink := ""
|
||||
if attrVal := getAttrVal(node, "href"); attrVal != "" {
|
||||
attrVal = ctx.normalizeHrefLink(attrVal)
|
||||
if attrVal != "" {
|
||||
hrefLink = "( " + attrVal + " )"
|
||||
}
|
||||
}
|
||||
|
||||
return ctx.emit(hrefLink)
|
||||
|
||||
case atom.P, atom.Ul, atom.Table:
|
||||
if err := ctx.emit("\n\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ctx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.emit("\n\n")
|
||||
|
||||
case atom.Tr:
|
||||
if err := ctx.traverseChildren(node); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctx.emit("\n")
|
||||
|
||||
case atom.Style, atom.Script, atom.Head:
|
||||
// Ignore the subtree
|
||||
return nil
|
||||
|
||||
default:
|
||||
return ctx.traverseChildren(node)
|
||||
}
|
||||
}
|
||||
func (ctx *textifyTraverseCtx) traverseChildren(node *html.Node) error {
|
||||
for c := node.FirstChild; c != nil; c = c.NextSibling {
|
||||
if err := ctx.traverse(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *textifyTraverseCtx) emit(data string) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
lines := ctx.breakLongLines(data)
|
||||
var err error
|
||||
for _, line := range lines {
|
||||
runes := []rune(line)
|
||||
startsWithSpace := unicode.IsSpace(runes[0])
|
||||
if !startsWithSpace && !ctx.endsWithSpace {
|
||||
ctx.Buf.WriteByte(' ')
|
||||
ctx.lineLength++
|
||||
}
|
||||
ctx.endsWithSpace = unicode.IsSpace(runes[len(runes)-1])
|
||||
for _, c := range line {
|
||||
_, err = ctx.Buf.WriteString(string(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.lineLength++
|
||||
if c == '\n' {
|
||||
ctx.lineLength = 0
|
||||
if ctx.prefix != "" {
|
||||
_, err = ctx.Buf.WriteString(ctx.prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *textifyTraverseCtx) breakLongLines(data string) []string {
|
||||
// only break lines when we are in blockquotes
|
||||
if ctx.blockquoteLevel == 0 {
|
||||
return []string{data}
|
||||
}
|
||||
var ret []string
|
||||
runes := []rune(data)
|
||||
l := len(runes)
|
||||
existing := ctx.lineLength
|
||||
if existing >= 74 {
|
||||
ret = append(ret, "\n")
|
||||
existing = 0
|
||||
}
|
||||
for l+existing > 74 {
|
||||
i := 74 - existing
|
||||
for i >= 0 && !unicode.IsSpace(runes[i]) {
|
||||
i--
|
||||
}
|
||||
if i == -1 {
|
||||
// no spaces, so go the other way
|
||||
i = 74 - existing
|
||||
for i < l && !unicode.IsSpace(runes[i]) {
|
||||
i++
|
||||
}
|
||||
}
|
||||
ret = append(ret, string(runes[:i])+"\n")
|
||||
for i < l && unicode.IsSpace(runes[i]) {
|
||||
i++
|
||||
}
|
||||
runes = runes[i:]
|
||||
l = len(runes)
|
||||
existing = 0
|
||||
}
|
||||
if len(runes) > 0 {
|
||||
ret = append(ret, string(runes))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (ctx *textifyTraverseCtx) normalizeHrefLink(link string) string {
|
||||
link = strings.TrimSpace(link)
|
||||
link = strings.TrimPrefix(link, "mailto:")
|
||||
return link
|
||||
}
|
||||
|
||||
func getAttrVal(node *html.Node, attrName string) string {
|
||||
for _, attr := range node.Attr {
|
||||
if attr.Key == attrName {
|
||||
return attr.Val
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func FromHtmlNode(doc *html.Node) (string, error) {
|
||||
ctx := textifyTraverseCtx{
|
||||
Buf: bytes.Buffer{},
|
||||
}
|
||||
if err := ctx.traverse(doc); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
text := strings.TrimSpace(newlineRe.ReplaceAllString(
|
||||
strings.Replace(ctx.Buf.String(), "\n ", "\n", -1), "\n\n"))
|
||||
return text, nil
|
||||
|
||||
}
|
||||
|
||||
func FromReader(reader io.Reader) (string, error) {
|
||||
newReader, err := bom.NewReaderWithoutBom(reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
doc, err := html.Parse(newReader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return FromHtmlNode(doc)
|
||||
}
|
||||
|
||||
func FromString(input string) (string, error) {
|
||||
bs := bom.CleanBom([]byte(input))
|
||||
text, err := FromReader(bytes.NewReader(bs))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return text, nil
|
||||
}
|
||||
674
vendor/github.com/jaytaylor/html2text/html2text_test.go
generated
vendored
Normal file
674
vendor/github.com/jaytaylor/html2text/html2text_test.go
generated
vendored
Normal file
@@ -0,0 +1,674 @@
|
||||
package html2text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
destPath = "testdata"
|
||||
)
|
||||
|
||||
func TestParseUTF8(t *testing.T) {
|
||||
htmlFiles := []struct {
|
||||
file string
|
||||
keywordShouldNotExist string
|
||||
keywordShouldExist string
|
||||
}{
|
||||
{
|
||||
"utf8.html",
|
||||
"学习之道:美国公认学习第一书title",
|
||||
"次世界冠军赛上,我几近疯狂",
|
||||
},
|
||||
{
|
||||
"utf8_with_bom.xhtml",
|
||||
"1892年波兰文版序言title",
|
||||
"种新的波兰文本已成为必要",
|
||||
},
|
||||
}
|
||||
|
||||
for _, htmlFile := range htmlFiles {
|
||||
bs, err := ioutil.ReadFile(path.Join(destPath, htmlFile.file))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
text, err := FromReader(bytes.NewReader(bs))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !strings.Contains(text, htmlFile.keywordShouldExist) {
|
||||
t.Fatalf("keyword %s should exists in file %s", htmlFile.keywordShouldExist, htmlFile.file)
|
||||
}
|
||||
if strings.Contains(text, htmlFile.keywordShouldNotExist) {
|
||||
t.Fatalf("keyword %s should not exists in file %s", htmlFile.keywordShouldNotExist, htmlFile.file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrippingWhitespace(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"test text",
|
||||
"test text",
|
||||
},
|
||||
{
|
||||
" \ttext\ntext\n",
|
||||
"text text",
|
||||
},
|
||||
{
|
||||
" \na \n\t \n \n a \t",
|
||||
"a a",
|
||||
},
|
||||
{
|
||||
"test text",
|
||||
"test text",
|
||||
},
|
||||
{
|
||||
"test text ",
|
||||
"test text",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParagraphsAndBreaks(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"Test text",
|
||||
"Test text",
|
||||
},
|
||||
{
|
||||
"Test text<br>",
|
||||
"Test text",
|
||||
},
|
||||
{
|
||||
"Test text<br>Test",
|
||||
"Test text\nTest",
|
||||
},
|
||||
{
|
||||
"<p>Test text</p>",
|
||||
"Test text",
|
||||
},
|
||||
{
|
||||
"<p>Test text</p><p>Test text</p>",
|
||||
"Test text\n\nTest text",
|
||||
},
|
||||
{
|
||||
"\n<p>Test text</p>\n\n\n\t<p>Test text</p>\n",
|
||||
"Test text\n\nTest text",
|
||||
},
|
||||
{
|
||||
"\n<p>Test text<br/>Test text</p>\n",
|
||||
"Test text\nTest text",
|
||||
},
|
||||
{
|
||||
"\n<p>Test text<br> \tTest text<br></p>\n",
|
||||
"Test text\nTest text",
|
||||
},
|
||||
{
|
||||
"Test text<br><BR />Test text",
|
||||
"Test text\n\nTest text",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<table><tr><td></td><td></td></tr></table>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<table><tr><td>cell1</td><td>cell2</td></tr></table>",
|
||||
"cell1 cell2",
|
||||
},
|
||||
{
|
||||
"<table><tr><td>row1</td></tr><tr><td>row2</td></tr></table>",
|
||||
"row1\nrow2",
|
||||
},
|
||||
{
|
||||
`<table>
|
||||
<tr><td>cell1-1</td><td>cell1-2</td></tr>
|
||||
<tr><td>cell2-1</td><td>cell2-2</td></tr>
|
||||
</table>`,
|
||||
"cell1-1 cell1-2\ncell2-1 cell2-2",
|
||||
},
|
||||
{
|
||||
"_<table><tr><td>cell</td></tr></table>_",
|
||||
"_\n\ncell\n\n_",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStrippingLists(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<ul></ul>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<ul><li>item</li></ul>_",
|
||||
"* item\n\n_",
|
||||
},
|
||||
{
|
||||
"<li class='123'>item 1</li> <li>item 2</li>\n_",
|
||||
"* item 1\n* item 2\n_",
|
||||
},
|
||||
{
|
||||
"<li>item 1</li> \t\n <li>item 2</li> <li> item 3</li>\n_",
|
||||
"* item 1\n* item 2\n* item 3\n_",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinks(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
`<a></a>`,
|
||||
``,
|
||||
},
|
||||
{
|
||||
`<a href=""></a>`,
|
||||
``,
|
||||
},
|
||||
{
|
||||
`<a href="http://example.com/"></a>`,
|
||||
`( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
`<a href="">Link</a>`,
|
||||
`Link`,
|
||||
},
|
||||
{
|
||||
`<a href="http://example.com/">Link</a>`,
|
||||
`Link ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
`<a href="http://example.com/"><span class="a">Link</span></a>`,
|
||||
`Link ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
"<a href='http://example.com/'>\n\t<span class='a'>Link</span>\n\t</a>",
|
||||
`Link ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
"<a href='mailto:contact@example.org'>Contact Us</a>",
|
||||
`Contact Us ( contact@example.org )`,
|
||||
},
|
||||
{
|
||||
"<a href=\"http://example.com:80/~user?aaa=bb&c=d,e,f#foo\">Link</a>",
|
||||
`Link ( http://example.com:80/~user?aaa=bb&c=d,e,f#foo )`,
|
||||
},
|
||||
{
|
||||
"<a title='title' href=\"http://example.com/\">Link</a>",
|
||||
`Link ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
"<a href=\" http://example.com/ \"> Link </a>",
|
||||
`Link ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
"<a href=\"http://example.com/a/\">Link A</a> <a href=\"http://example.com/b/\">Link B</a>",
|
||||
`Link A ( http://example.com/a/ ) Link B ( http://example.com/b/ )`,
|
||||
},
|
||||
{
|
||||
"<a href=\"%%LINK%%\">Link</a>",
|
||||
`Link ( %%LINK%% )`,
|
||||
},
|
||||
{
|
||||
"<a href=\"[LINK]\">Link</a>",
|
||||
`Link ( [LINK] )`,
|
||||
},
|
||||
{
|
||||
"<a href=\"{LINK}\">Link</a>",
|
||||
`Link ( {LINK} )`,
|
||||
},
|
||||
{
|
||||
"<a href=\"[[!unsubscribe]]\">Link</a>",
|
||||
`Link ( [[!unsubscribe]] )`,
|
||||
},
|
||||
{
|
||||
"<p>This is <a href=\"http://www.google.com\" >link1</a> and <a href=\"http://www.google.com\" >link2 </a> is next.</p>",
|
||||
`This is link1 ( http://www.google.com ) and link2 ( http://www.google.com ) is next.`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageAltTags(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
`<img />`,
|
||||
``,
|
||||
},
|
||||
{
|
||||
`<img src="http://example.ru/hello.jpg" />`,
|
||||
``,
|
||||
},
|
||||
{
|
||||
`<img alt="Example"/>`,
|
||||
``,
|
||||
},
|
||||
{
|
||||
`<img src="http://example.ru/hello.jpg" alt="Example"/>`,
|
||||
``,
|
||||
},
|
||||
// Images do matter if they are in a link
|
||||
{
|
||||
`<a href="http://example.com/"><img src="http://example.ru/hello.jpg" alt="Example"/></a>`,
|
||||
`Example ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
`<a href="http://example.com/"><img src="http://example.ru/hello.jpg" alt="Example"></a>`,
|
||||
`Example ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
`<a href='http://example.com/'><img src='http://example.ru/hello.jpg' alt='Example'/></a>`,
|
||||
`Example ( http://example.com/ )`,
|
||||
},
|
||||
{
|
||||
`<a href='http://example.com/'><img src='http://example.ru/hello.jpg' alt='Example'></a>`,
|
||||
`Example ( http://example.com/ )`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeadings(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<h1>Test</h1>",
|
||||
"****\nTest\n****",
|
||||
},
|
||||
{
|
||||
"\t<h1>\nTest</h1> ",
|
||||
"****\nTest\n****",
|
||||
},
|
||||
{
|
||||
"\t<h1>\nTest line 1<br>Test 2</h1> ",
|
||||
"***********\nTest line 1\nTest 2\n***********",
|
||||
},
|
||||
{
|
||||
"<h1>Test</h1> <h1>Test</h1>",
|
||||
"****\nTest\n****\n\n****\nTest\n****",
|
||||
},
|
||||
{
|
||||
"<h2>Test</h2>",
|
||||
"----\nTest\n----",
|
||||
},
|
||||
{
|
||||
"<h1><a href='http://example.com/'>Test</a></h1>",
|
||||
"****************************\nTest ( http://example.com/ )\n****************************",
|
||||
},
|
||||
{
|
||||
"<h3> <span class='a'>Test </span></h3>",
|
||||
"Test\n----",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBold(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<b>Test</b>",
|
||||
"*Test*",
|
||||
},
|
||||
{
|
||||
"\t<b>Test</b> ",
|
||||
"*Test*",
|
||||
},
|
||||
{
|
||||
"\t<b>Test line 1<br>Test 2</b> ",
|
||||
"*Test line 1\nTest 2*",
|
||||
},
|
||||
{
|
||||
"<b>Test</b> <b>Test</b>",
|
||||
"*Test* *Test*",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDiv(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<div>Test</div>",
|
||||
"Test",
|
||||
},
|
||||
{
|
||||
"\t<div>Test</div> ",
|
||||
"Test",
|
||||
},
|
||||
{
|
||||
"<div>Test line 1<div>Test 2</div></div>",
|
||||
"Test line 1\nTest 2",
|
||||
},
|
||||
{
|
||||
"Test 1<div>Test 2</div> <div>Test 3</div>Test 4",
|
||||
"Test 1\nTest 2\nTest 3\nTest 4",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBlockquotes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<div>level 0<blockquote>level 1<br><blockquote>level 2</blockquote>level 1</blockquote><div>level 0</div></div>",
|
||||
"level 0\n> \n> level 1\n> \n>> level 2\n> \n> level 1\n\nlevel 0",
|
||||
},
|
||||
{
|
||||
"<blockquote>Test</blockquote>Test",
|
||||
"> \n> Test\n\nTest",
|
||||
},
|
||||
{
|
||||
"\t<blockquote> \nTest<br></blockquote> ",
|
||||
"> \n> Test\n>",
|
||||
},
|
||||
{
|
||||
"\t<blockquote> \nTest line 1<br>Test 2</blockquote> ",
|
||||
"> \n> Test line 1\n> Test 2",
|
||||
},
|
||||
{
|
||||
"<blockquote>Test</blockquote> <blockquote>Test</blockquote> Other Test",
|
||||
"> \n> Test\n\n> \n> Test\n\nOther Test",
|
||||
},
|
||||
{
|
||||
"<blockquote>Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse labore aute quis commodo non sit dolore officia Excepteur cillum amet cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor irure do</blockquote>",
|
||||
"> \n> Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad\n> sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat\n> voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse\n> labore aute quis commodo non sit dolore officia Excepteur cillum amet\n> cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor\n> irure do",
|
||||
},
|
||||
{
|
||||
"<blockquote>Lorem<b>ipsum</b><b>Commodo</b><b>id</b><b>consectetur</b><b>pariatur</b><b>ea</b><b>occaecat</b><b>minim</b><b>aliqua</b><b>ad</b><b>sit</b><b>consequat</b><b>quis</b><b>ex</b><b>commodo</b><b>Duis</b><b>incididunt</b><b>eu</b><b>mollit</b><b>consectetur</b><b>fugiat</b><b>voluptate</b><b>dolore</b><b>in</b><b>pariatur</b><b>in</b><b>commodo</b><b>occaecat</b><b>Ut</b><b>occaecat</b><b>velit</b><b>esse</b><b>labore</b><b>aute</b><b>quis</b><b>commodo</b><b>non</b><b>sit</b><b>dolore</b><b>officia</b><b>Excepteur</b><b>cillum</b><b>amet</b><b>cupidatat</b><b>culpa</b><b>velit</b><b>labore</b><b>ullamco</b><b>dolore</b><b>mollit</b><b>elit</b><b>in</b><b>aliqua</b><b>dolor</b><b>irure</b><b>do</b></blockquote>",
|
||||
"> \n> Lorem *ipsum* *Commodo* *id* *consectetur* *pariatur* *ea* *occaecat* *minim*\n> *aliqua* *ad* *sit* *consequat* *quis* *ex* *commodo* *Duis* *incididunt* *eu*\n> *mollit* *consectetur* *fugiat* *voluptate* *dolore* *in* *pariatur* *in* *commodo*\n> *occaecat* *Ut* *occaecat* *velit* *esse* *labore* *aute* *quis* *commodo*\n> *non* *sit* *dolore* *officia* *Excepteur* *cillum* *amet* *cupidatat* *culpa*\n> *velit* *labore* *ullamco* *dolore* *mollit* *elit* *in* *aliqua* *dolor* *irure*\n> *do*",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIgnoreStylesScriptsHead(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
output string
|
||||
}{
|
||||
{
|
||||
"<style>Test</style>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<style type=\"text/css\">body { color: #fff; }</style>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<link rel=\"stylesheet\" href=\"main.css\">",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<script>Test</script>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<script src=\"main.js\"></script>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<script type=\"text/javascript\" src=\"main.js\"></script>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<script type=\"text/javascript\">Test</script>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<script type=\"text/ng-template\" id=\"template.html\"><a href=\"http://google.com\">Google</a></script>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"<script type=\"bla-bla-bla\" id=\"template.html\">Test</script>",
|
||||
"",
|
||||
},
|
||||
{
|
||||
`<html><head><title>Title</title></head><body></body></html>`,
|
||||
"",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertString(t, testCase.input, testCase.output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestText(t *testing.T) {
|
||||
testCases := []struct {
|
||||
input string
|
||||
expr string
|
||||
}{
|
||||
{
|
||||
`<li>
|
||||
<a href="/new" data-ga-click="Header, create new repository, icon:repo"><span class="octicon octicon-repo"></span> New repository</a>
|
||||
</li>`,
|
||||
`\* New repository \( /new \)`,
|
||||
},
|
||||
{
|
||||
`hi
|
||||
|
||||
<br>
|
||||
|
||||
hello <a href="https://google.com">google</a>
|
||||
<br><br>
|
||||
test<p>List:</p>
|
||||
|
||||
<ul>
|
||||
<li><a href="foo">Foo</a></li>
|
||||
<li><a href="http://www.microshwhat.com/bar/soapy">Barsoap</a></li>
|
||||
<li>Baz</li>
|
||||
</ul>
|
||||
`,
|
||||
`hi
|
||||
hello google \( https://google.com \)
|
||||
|
||||
test
|
||||
|
||||
List:
|
||||
|
||||
\* Foo \( foo \)
|
||||
\* Barsoap \( http://www.microshwhat.com/bar/soapy \)
|
||||
\* Baz`,
|
||||
},
|
||||
// Malformed input html.
|
||||
{
|
||||
`hi
|
||||
|
||||
hello <a href="https://google.com">google</a>
|
||||
|
||||
test<p>List:</p>
|
||||
|
||||
<ul>
|
||||
<li><a href="foo">Foo</a>
|
||||
<li><a href="/
|
||||
bar/baz">Bar</a>
|
||||
<li>Baz</li>
|
||||
</ul>
|
||||
`,
|
||||
`hi hello google \( https://google.com \) test
|
||||
|
||||
List:
|
||||
|
||||
\* Foo \( foo \)
|
||||
\* Bar \( /\n[ \t]+bar/baz \)
|
||||
\* Baz`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertRegexp(t, testCase.input, testCase.expr)
|
||||
}
|
||||
}
|
||||
|
||||
type StringMatcher interface {
|
||||
MatchString(string) bool
|
||||
String() string
|
||||
}
|
||||
|
||||
type RegexpStringMatcher string
|
||||
|
||||
func (m RegexpStringMatcher) MatchString(str string) bool {
|
||||
return regexp.MustCompile(string(m)).MatchString(str)
|
||||
}
|
||||
func (m RegexpStringMatcher) String() string {
|
||||
return string(m)
|
||||
}
|
||||
|
||||
type ExactStringMatcher string
|
||||
|
||||
func (m ExactStringMatcher) MatchString(str string) bool {
|
||||
return string(m) == str
|
||||
}
|
||||
func (m ExactStringMatcher) String() string {
|
||||
return string(m)
|
||||
}
|
||||
|
||||
func assertRegexp(t *testing.T, input string, outputRE string) {
|
||||
assertPlaintext(t, input, RegexpStringMatcher(outputRE))
|
||||
}
|
||||
|
||||
func assertString(t *testing.T, input string, output string) {
|
||||
assertPlaintext(t, input, ExactStringMatcher(output))
|
||||
}
|
||||
|
||||
func assertPlaintext(t *testing.T, input string, matcher StringMatcher) {
|
||||
text, err := FromString(input)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !matcher.MatchString(text) {
|
||||
t.Errorf("Input did not match expression\n"+
|
||||
"Input:\n>>>>\n%s\n<<<<\n\n"+
|
||||
"Output:\n>>>>\n%s\n<<<<\n\n"+
|
||||
"Expected output:\n>>>>\n%s\n<<<<\n\n",
|
||||
input, text, matcher.String())
|
||||
} else {
|
||||
t.Logf("input:\n\n%s\n\n\n\noutput:\n\n%s\n", input, text)
|
||||
}
|
||||
}
|
||||
|
||||
func Example() {
|
||||
inputHtml := `
|
||||
<html>
|
||||
<head>
|
||||
<title>My Mega Service</title>
|
||||
<link rel=\"stylesheet\" href=\"main.css\">
|
||||
<style type=\"text/css\">body { color: #fff; }</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div class="logo">
|
||||
<a href="http://mymegaservice.com/"><img src="/logo-image.jpg" alt="Mega Service"/></a>
|
||||
</div>
|
||||
|
||||
<h1>Welcome to your new account on my service!</h1>
|
||||
|
||||
<p>
|
||||
Here is some more information:
|
||||
|
||||
<ul>
|
||||
<li>Link 1: <a href="https://example.com">Example.com</a></li>
|
||||
<li>Link 2: <a href="https://example2.com">Example2.com</a></li>
|
||||
<li>Something else</li>
|
||||
</ul>
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
|
||||
text, err := FromString(inputHtml)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(text)
|
||||
|
||||
// Output:
|
||||
// Mega Service ( http://mymegaservice.com/ )
|
||||
//
|
||||
// ******************************************
|
||||
// Welcome to your new account on my service!
|
||||
// ******************************************
|
||||
//
|
||||
// Here is some more information:
|
||||
//
|
||||
// * Link 1: Example.com ( https://example.com )
|
||||
// * Link 2: Example2.com ( https://example2.com )
|
||||
// * Something else
|
||||
}
|
||||
22
vendor/github.com/jaytaylor/html2text/testdata/utf8.html
generated
vendored
Executable file
22
vendor/github.com/jaytaylor/html2text/testdata/utf8.html
generated
vendored
Executable file
@@ -0,0 +1,22 @@
|
||||
<?xml version='1.0' encoding='utf-8'?>
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||
<title>学习之道:美国公认学习第一书title</title>
|
||||
<link href="stylesheet.css" rel="stylesheet" type="text/css" />
|
||||
<link href="page_styles.css" rel="stylesheet" type="text/css" />
|
||||
</head>
|
||||
|
||||
<body class="calibre">
|
||||
<p id="filepos9452" class="calibre_"><span class="calibre6"><span class="bold">写在前面的话</span></span>
|
||||
</p>
|
||||
<p class="calibre_12">在台湾的那次世界冠军赛上,我几近疯狂,直至两年后的今天,我仍沉浸在这次的经历中。这是我生平第一次如此深入地审视我自己,甚至是第一次尝试审视自己。这个过程令人很是兴奋,同时也有点感觉怪异。我重新认识了自我,看到了自己的另外一面,自己从未发觉的另外一面。为了生存,为了取胜,我成了一名角斗士,彻头彻尾,简单纯粹。我并没有意识到这一角色早已在我的心中生根发芽,呼之欲出。也许,他的出现已是不可避免。</p>
|
||||
<p class="calibre_7">而我这全新的一面,与我一直熟识的那个乔希,那个曾经害怕黑暗的孩子,那个象棋手,那个狂热于雨水、反复诵读杰克·克鲁亚克作品的年轻人之间,又有什么样的联系呢?这些都是我正在努力弄清楚的问题。</p>
|
||||
<p class="calibre_7">自台湾赛事之后,我急切非常,一心想要回到训练中去,摆脱自己已经达到巅峰的想法。在过去的两年中,我已经重新开始。这是一个新的起点。前方的路还很长,有待进一步的探索。</p>
|
||||
<p class="calibre_7">这本书的创作耗费了相当多的时间和精力。在成长的过程中,我在我的小房间里从未想过等待我的会是这样的战斗。在创作中,我的思想逐渐成熟;爱恋从分崩离析,到失而复得,世界冠军头衔从失之交臂,到囊中取物。如果说在我人生的第一个二十九年中,我学到了什么,那就是,我们永远无法预测结局,无论是重要的比赛、冒险,还是轰轰烈烈的爱情。我们唯一可以肯定的只有,出乎意料。不管我们做了多么万全的准备,在生活的真实场景中,我们总是会处于陌生的境地。我们也许会无法冷静,失去理智,感觉似乎整个世界都在针对我们。在这个时候,我们所要做的是要付出加倍的努力,要表现得比预想得更好。我认为,关键在于准备好随机应变,准备好在所能想象的高压下发挥出创造力。</p>
|
||||
<p class="calibre_7">读者朋友们,我非常希望你们在读过这本书后,可以得到启发,甚至会得到触动,从而能够根据各自的天赋与特长,去实现自己的梦想。这就是我写作此书的目的。我在字里行间所传达的理念曾经使我受益匪浅,我很希望它们可以为大家提供一个基本的框架和方向。如果我的方法言之有理,那么就请接受它,琢磨它,并加之自己的见解。忘记我的那些数字。真正的掌握需要通过自己发现一些最能够引起共鸣的信息,并将其彻底地融合进来,直至成为一体,这样我们才能随心所欲地驾驭它。</p>
|
||||
<div class="mbp_pagebreak" id="calibre_pb_4"></div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
24
vendor/github.com/jaytaylor/html2text/testdata/utf8_with_bom.xhtml
generated
vendored
Executable file
24
vendor/github.com/jaytaylor/html2text/testdata/utf8_with_bom.xhtml
generated
vendored
Executable file
@@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="zh-CN">
|
||||
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8" />
|
||||
<title>1892年波兰文版序言title</title>
|
||||
<link rel="stylesheet" href="css/stylesheet.css" type="text/css" />
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="page30" />
|
||||
<h2 id="CHP2-6">1892年波兰文版序言<a id="wzyy_18_30" href="#wz_18_30"><sup>[18]</sup></a></h2>
|
||||
<p>出版共产主义宣言的一种新的波兰文本已成为必要,这一事实,引起了许多感想。</p>
|
||||
<p>首先值得注意的是,近来宣言在一定程度上已成为欧洲大陆大工业发展的一种尺度。一个国家的大工业越发展,该国工人中想认清自己作为工人阶级在有产阶级面前所处地位的要求就越增加,他们中间的社会主义运动也越扩大,因而对宣言的需求也越增长。这样,根据宣言用某国文字销行的份数,不仅能够相当确切地断定该国工人运动的状况,而且还能够相当确切地断定该国大工业发展的程度。</p>
|
||||
<p>因此,波兰文的新版本标志着波兰工业的决定性进步。从十年前发表的上一个版本以来确实有了这种进步,对此丝毫不容置疑。俄国的波兰,会议的波兰<a id="wzyy_19_30" href="#wz_19_30"><sup>[19]</sup></a>,成了俄罗斯帝国巨大的工业区。俄国大工业是零星分散的,一部分在芬兰湾沿岸,一部分在中央区(莫斯科和弗拉基米尔),第三部分在黑海和亚速海沿岸,还有另一些散布在别处;而波兰工业则紧缩于相对狭小的地区,享受到由这种积聚引起的长处与短处。这种长处是竞争着的俄罗斯工厂主所承认的,他们要求实行保护关税以对付波兰,尽管他们渴望使波兰人俄罗斯化。这种短处,对波兰工厂主与俄罗斯政府来说,表现在社会主义思想在波兰工人中间的迅速传播和对宣言需求的增长。</p>
|
||||
<p>但是,波兰工业的迅速发展——它超过了俄国工业——本身<a id="page31" />是波兰人民的坚强生命力的一个新证明,是波兰人民临近的民族复兴的一个新保证。而一个独立强盛的波兰的复兴,不只是一件同波兰人有关、而且是同我们大家有关的事情。只有当每个民族在自己内部完全自主时,欧洲各民族间真诚的国际合作才是可能的。1848年革命在无产阶级旗帜下,使无产阶级的战士最终只作了资产阶级的工作,这次革命通过自己遗嘱的执行者路易·波拿巴和俾斯麦也实现了意大利、德国和匈牙利的独立。然而波兰,它从1792年以来为革命做的比所有这三个国家总共做的还要多,而当它1863年失败于强大十倍的俄军的时候,人们却把它抛弃不顾了。贵族既未能保持住、也未能重新争得波兰的独立;今天波兰的独立对资产阶级至少是无所谓的。然而波兰的独立对于欧洲各民族和谐的合作是必需的。这种独立只有年轻的波兰无产阶级才能争得,而且在它的手中会很好地保持住。因为欧洲所有其余的工人都象波兰工人自己一样也需要波兰的独立。</p>
|
||||
<p>弗·恩格斯</p>
|
||||
<p>1892年2月10日于伦敦</p>
|
||||
<div id="page74" />
|
||||
<div><a id="wz_18_30" href="#wzyy_18_30">[18]</a> 恩格斯用德文为《宣言》新的波兰文本写了这篇序言。1892年由波兰社会主义者在伦敦办的《黎明》杂志社出版。序言寄出后,恩格斯写信给门德尔森(1892年2月11日),信中说,他很愿意学会波兰文,并且深入研究波兰工人运动的发展,以便能够为《宣言》的下一版写一篇更详细的序言。——第20页</div>
|
||||
<div><a id="wz_19_30" href="#wzyy_19_30">[19]</a> 指维也纳会议的波兰,即根据1814—1815年维也纳会议的决定,以波兰王国的正式名义割给俄国的那部分波兰土地。——第20页</div>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
2
vendor/github.com/lib/pq/README.md
generated
vendored
2
vendor/github.com/lib/pq/README.md
generated
vendored
@@ -1,6 +1,6 @@
|
||||
# pq - A pure Go postgres driver for Go's database/sql package
|
||||
|
||||
[](https://travis-ci.org/lib/pq)
|
||||
[](https://travis-ci.org/lib/pq)
|
||||
|
||||
## Install
|
||||
|
||||
|
||||
92
vendor/github.com/lib/pq/conn.go
generated
vendored
92
vendor/github.com/lib/pq/conn.go
generated
vendored
@@ -133,7 +133,7 @@ type conn struct {
|
||||
// Handle driver-side settings in parsed connection string.
|
||||
func (c *conn) handleDriverSettings(o values) (err error) {
|
||||
boolSetting := func(key string, val *bool) error {
|
||||
if value := o.Get(key); value != "" {
|
||||
if value, ok := o[key]; ok {
|
||||
if value == "yes" {
|
||||
*val = true
|
||||
} else if value == "no" {
|
||||
@@ -158,8 +158,7 @@ func (c *conn) handleDriverSettings(o values) (err error) {
|
||||
|
||||
func (c *conn) handlePgpass(o values) {
|
||||
// if a password was supplied, do not process .pgpass
|
||||
_, ok := o["password"]
|
||||
if ok {
|
||||
if _, ok := o["password"]; ok {
|
||||
return
|
||||
}
|
||||
filename := os.Getenv("PGPASSFILE")
|
||||
@@ -187,11 +186,11 @@ func (c *conn) handlePgpass(o values) {
|
||||
}
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(io.Reader(file))
|
||||
hostname := o.Get("host")
|
||||
hostname := o["host"]
|
||||
ntw, _ := network(o)
|
||||
port := o.Get("port")
|
||||
db := o.Get("dbname")
|
||||
username := o.Get("user")
|
||||
port := o["port"]
|
||||
db := o["dbname"]
|
||||
username := o["user"]
|
||||
// From: https://github.com/tg/pgpass/blob/master/reader.go
|
||||
getFields := func(s string) []string {
|
||||
fs := make([]string, 0, 5)
|
||||
@@ -256,13 +255,13 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
|
||||
// * Very low precedence defaults applied in every situation
|
||||
// * Environment variables
|
||||
// * Explicitly passed connection information
|
||||
o.Set("host", "localhost")
|
||||
o.Set("port", "5432")
|
||||
o["host"] = "localhost"
|
||||
o["port"] = "5432"
|
||||
// N.B.: Extra float digits should be set to 3, but that breaks
|
||||
// Postgres 8.4 and older, where the max is 2.
|
||||
o.Set("extra_float_digits", "2")
|
||||
o["extra_float_digits"] = "2"
|
||||
for k, v := range parseEnviron(os.Environ()) {
|
||||
o.Set(k, v)
|
||||
o[k] = v
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
|
||||
@@ -277,9 +276,9 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
|
||||
}
|
||||
|
||||
// Use the "fallback" application name if necessary
|
||||
if fallback := o.Get("fallback_application_name"); fallback != "" {
|
||||
if !o.Isset("application_name") {
|
||||
o.Set("application_name", fallback)
|
||||
if fallback, ok := o["fallback_application_name"]; ok {
|
||||
if _, ok := o["application_name"]; !ok {
|
||||
o["application_name"] = fallback
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,29 +289,29 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
|
||||
// parsing its value is not worth it. Instead, we always explicitly send
|
||||
// client_encoding as a separate run-time parameter, which should override
|
||||
// anything set in options.
|
||||
if enc := o.Get("client_encoding"); enc != "" && !isUTF8(enc) {
|
||||
if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
|
||||
return nil, errors.New("client_encoding must be absent or 'UTF8'")
|
||||
}
|
||||
o.Set("client_encoding", "UTF8")
|
||||
o["client_encoding"] = "UTF8"
|
||||
// DateStyle needs a similar treatment.
|
||||
if datestyle := o.Get("datestyle"); datestyle != "" {
|
||||
if datestyle, ok := o["datestyle"]; ok {
|
||||
if datestyle != "ISO, MDY" {
|
||||
panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
|
||||
"ISO, MDY", datestyle))
|
||||
}
|
||||
} else {
|
||||
o.Set("datestyle", "ISO, MDY")
|
||||
o["datestyle"] = "ISO, MDY"
|
||||
}
|
||||
|
||||
// If a user is not provided by any other means, the last
|
||||
// resort is to use the current operating system provided user
|
||||
// name.
|
||||
if o.Get("user") == "" {
|
||||
if _, ok := o["user"]; !ok {
|
||||
u, err := userCurrent()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
o.Set("user", u)
|
||||
o["user"] = u
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,7 +334,7 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
|
||||
cn.startup(o)
|
||||
|
||||
// reset the deadline, in case one was set (see dial)
|
||||
if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" {
|
||||
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
|
||||
err = cn.c.SetDeadline(time.Time{})
|
||||
}
|
||||
return cn, err
|
||||
@@ -349,7 +348,7 @@ func dial(d Dialer, o values) (net.Conn, error) {
|
||||
}
|
||||
|
||||
// Zero or not specified means wait indefinitely.
|
||||
if timeout := o.Get("connect_timeout"); timeout != "" && timeout != "0" {
|
||||
if timeout, ok := o["connect_timeout"]; ok && timeout != "0" {
|
||||
seconds, err := strconv.ParseInt(timeout, 10, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
|
||||
@@ -371,31 +370,18 @@ func dial(d Dialer, o values) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func network(o values) (string, string) {
|
||||
host := o.Get("host")
|
||||
host := o["host"]
|
||||
|
||||
if strings.HasPrefix(host, "/") {
|
||||
sockPath := path.Join(host, ".s.PGSQL."+o.Get("port"))
|
||||
sockPath := path.Join(host, ".s.PGSQL."+o["port"])
|
||||
return "unix", sockPath
|
||||
}
|
||||
|
||||
return "tcp", net.JoinHostPort(host, o.Get("port"))
|
||||
return "tcp", net.JoinHostPort(host, o["port"])
|
||||
}
|
||||
|
||||
type values map[string]string
|
||||
|
||||
func (vs values) Set(k, v string) {
|
||||
vs[k] = v
|
||||
}
|
||||
|
||||
func (vs values) Get(k string) (v string) {
|
||||
return vs[k]
|
||||
}
|
||||
|
||||
func (vs values) Isset(k string) bool {
|
||||
_, ok := vs[k]
|
||||
return ok
|
||||
}
|
||||
|
||||
// scanner implements a tokenizer for libpq-style option strings.
|
||||
type scanner struct {
|
||||
s []rune
|
||||
@@ -466,7 +452,7 @@ func parseOpts(name string, o values) error {
|
||||
// Skip any whitespace after the =
|
||||
if r, ok = s.SkipSpaces(); !ok {
|
||||
// If we reach the end here, the last value is just an empty string as per libpq.
|
||||
o.Set(string(keyRunes), "")
|
||||
o[string(keyRunes)] = ""
|
||||
break
|
||||
}
|
||||
|
||||
@@ -501,7 +487,7 @@ func parseOpts(name string, o values) error {
|
||||
}
|
||||
}
|
||||
|
||||
o.Set(string(keyRunes), string(valRunes))
|
||||
o[string(keyRunes)] = string(valRunes)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -665,6 +651,12 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) {
|
||||
cn: cn,
|
||||
}
|
||||
}
|
||||
// Set the result and tag to the last command complete if there wasn't a
|
||||
// query already run. Although queries usually return from here and cede
|
||||
// control to Next, a query with zero results does not.
|
||||
if t == 'C' && res.colNames == nil {
|
||||
res.result, res.tag = cn.parseComplete(r.string())
|
||||
}
|
||||
res.done = true
|
||||
case 'Z':
|
||||
cn.processReadyForQuery(r)
|
||||
@@ -1119,7 +1111,7 @@ func (cn *conn) auth(r *readBuf, o values) {
|
||||
// OK
|
||||
case 3:
|
||||
w := cn.writeBuf('p')
|
||||
w.string(o.Get("password"))
|
||||
w.string(o["password"])
|
||||
cn.send(w)
|
||||
|
||||
t, r := cn.recv()
|
||||
@@ -1133,7 +1125,7 @@ func (cn *conn) auth(r *readBuf, o values) {
|
||||
case 5:
|
||||
s := string(r.next(4))
|
||||
w := cn.writeBuf('p')
|
||||
w.string("md5" + md5s(md5s(o.Get("password")+o.Get("user"))+s))
|
||||
w.string("md5" + md5s(md5s(o["password"]+o["user"])+s))
|
||||
cn.send(w)
|
||||
|
||||
t, r := cn.recv()
|
||||
@@ -1333,6 +1325,8 @@ type rows struct {
|
||||
colFmts []format
|
||||
done bool
|
||||
rb readBuf
|
||||
result driver.Result
|
||||
tag string
|
||||
}
|
||||
|
||||
func (rs *rows) Close() error {
|
||||
@@ -1356,6 +1350,17 @@ func (rs *rows) Columns() []string {
|
||||
return rs.colNames
|
||||
}
|
||||
|
||||
func (rs *rows) Result() driver.Result {
|
||||
if rs.result == nil {
|
||||
return emptyRows
|
||||
}
|
||||
return rs.result
|
||||
}
|
||||
|
||||
func (rs *rows) Tag() string {
|
||||
return rs.tag
|
||||
}
|
||||
|
||||
func (rs *rows) Next(dest []driver.Value) (err error) {
|
||||
if rs.done {
|
||||
return io.EOF
|
||||
@@ -1373,6 +1378,9 @@ func (rs *rows) Next(dest []driver.Value) (err error) {
|
||||
case 'E':
|
||||
err = parseError(&rs.rb)
|
||||
case 'C', 'I':
|
||||
if t == 'C' {
|
||||
rs.result, rs.tag = conn.parseComplete(rs.rb.string())
|
||||
}
|
||||
continue
|
||||
case 'Z':
|
||||
conn.processReadyForQuery(&rs.rb)
|
||||
|
||||
35
vendor/github.com/lib/pq/conn_go18.go
generated
vendored
35
vendor/github.com/lib/pq/conn_go18.go
generated
vendored
@@ -14,10 +14,7 @@ func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.Na
|
||||
for i, nv := range args {
|
||||
list[i] = nv.Value
|
||||
}
|
||||
var closed chan<- struct{}
|
||||
if ctx.Done() != nil {
|
||||
closed = watchCancel(ctx, cn.cancel)
|
||||
}
|
||||
closed := cn.watchCancel(ctx)
|
||||
r, err := cn.query(query, list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -33,8 +30,7 @@ func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.Nam
|
||||
list[i] = nv.Value
|
||||
}
|
||||
|
||||
if ctx.Done() != nil {
|
||||
closed := watchCancel(ctx, cn.cancel)
|
||||
if closed := cn.watchCancel(ctx); closed != nil {
|
||||
defer close(closed)
|
||||
}
|
||||
|
||||
@@ -53,22 +49,23 @@ func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ctx.Done() != nil {
|
||||
cn.txnClosed = watchCancel(ctx, cn.cancel)
|
||||
}
|
||||
cn.txnClosed = cn.watchCancel(ctx)
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
func watchCancel(ctx context.Context, cancel func()) chan<- struct{} {
|
||||
closed := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cancel()
|
||||
case <-closed:
|
||||
}
|
||||
}()
|
||||
return closed
|
||||
func (cn *conn) watchCancel(ctx context.Context) chan<- struct{} {
|
||||
if done := ctx.Done(); done != nil {
|
||||
closed := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
cn.cancel()
|
||||
case <-closed:
|
||||
}
|
||||
}()
|
||||
return closed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cn *conn) cancel() {
|
||||
|
||||
108
vendor/github.com/lib/pq/conn_test.go
generated
vendored
108
vendor/github.com/lib/pq/conn_test.go
generated
vendored
@@ -191,7 +191,7 @@ localhost:*:*:*:pass_C
|
||||
pgpass.Close()
|
||||
|
||||
assertPassword := func(extra values, expected string) {
|
||||
o := &values{
|
||||
o := values{
|
||||
"host": "localhost",
|
||||
"sslmode": "disable",
|
||||
"connect_timeout": "20",
|
||||
@@ -203,11 +203,11 @@ localhost:*:*:*:pass_C
|
||||
"datestyle": "ISO, MDY",
|
||||
}
|
||||
for k, v := range extra {
|
||||
(*o)[k] = v
|
||||
o[k] = v
|
||||
}
|
||||
(&conn{}).handlePgpass(*o)
|
||||
if o.Get("password") != expected {
|
||||
t.Fatalf("For %v expected %s got %s", extra, expected, o.Get("password"))
|
||||
(&conn{}).handlePgpass(o)
|
||||
if pw := o["password"]; pw != expected {
|
||||
t.Fatalf("For %v expected %s got %s", extra, expected, pw)
|
||||
}
|
||||
}
|
||||
// wrong permissions for the pgpass file means it should be ignored
|
||||
@@ -686,17 +686,28 @@ func TestCloseBadConn(t *testing.T) {
|
||||
if err := cn.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// During the Go 1.9 cycle, https://github.com/golang/go/commit/3792db5
|
||||
// changed this error from
|
||||
//
|
||||
// net.errClosing = errors.New("use of closed network connection")
|
||||
//
|
||||
// to
|
||||
//
|
||||
// internal/poll.ErrClosing = errors.New("use of closed file or network connection")
|
||||
const errClosing = "use of closed"
|
||||
|
||||
// Verify write after closing fails.
|
||||
if _, err := nc.Write(nil); err == nil {
|
||||
t.Fatal("expected error")
|
||||
} else if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
t.Fatalf("expected use of closed network connection error, got %s", err)
|
||||
} else if !strings.Contains(err.Error(), errClosing) {
|
||||
t.Fatalf("expected %s error, got %s", errClosing, err)
|
||||
}
|
||||
// Verify second close fails.
|
||||
if err := cn.Close(); err == nil {
|
||||
t.Fatal("expected error")
|
||||
} else if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
t.Fatalf("expected use of closed network connection error, got %s", err)
|
||||
} else if !strings.Contains(err.Error(), errClosing) {
|
||||
t.Fatalf("expected %s error, got %s", errClosing, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1493,3 +1504,82 @@ func TestQuoteIdentifier(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRowsResultTag(t *testing.T) {
|
||||
type ResultTag interface {
|
||||
Result() driver.Result
|
||||
Tag() string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
query string
|
||||
tag string
|
||||
ra int64
|
||||
}{
|
||||
{
|
||||
query: "CREATE TEMP TABLE temp (a int)",
|
||||
tag: "CREATE TABLE",
|
||||
},
|
||||
{
|
||||
query: "INSERT INTO temp VALUES (1), (2)",
|
||||
tag: "INSERT",
|
||||
ra: 2,
|
||||
},
|
||||
{
|
||||
query: "SELECT 1",
|
||||
},
|
||||
// A SELECT anywhere should take precedent.
|
||||
{
|
||||
query: "SELECT 1; INSERT INTO temp VALUES (1), (2)",
|
||||
},
|
||||
{
|
||||
query: "INSERT INTO temp VALUES (1), (2); SELECT 1",
|
||||
},
|
||||
// Multiple statements that don't return rows should return the last tag.
|
||||
{
|
||||
query: "CREATE TEMP TABLE t (a int); DROP TABLE t",
|
||||
tag: "DROP TABLE",
|
||||
},
|
||||
// Ensure a rows-returning query in any position among various tags-returing
|
||||
// statements will prefer the rows.
|
||||
{
|
||||
query: "SELECT 1; CREATE TEMP TABLE t (a int); DROP TABLE t",
|
||||
},
|
||||
{
|
||||
query: "CREATE TEMP TABLE t (a int); SELECT 1; DROP TABLE t",
|
||||
},
|
||||
{
|
||||
query: "CREATE TEMP TABLE t (a int); DROP TABLE t; SELECT 1",
|
||||
},
|
||||
// Verify that an no-results query doesn't set the tag.
|
||||
{
|
||||
query: "CREATE TEMP TABLE t (a int); SELECT 1 WHERE FALSE; DROP TABLE t;",
|
||||
},
|
||||
}
|
||||
|
||||
// If this is the only test run, this will correct the connection string.
|
||||
openTestConn(t).Close()
|
||||
|
||||
conn, err := Open("")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
q := conn.(driver.Queryer)
|
||||
|
||||
for _, test := range tests {
|
||||
if rows, err := q.Query(test.query, nil); err != nil {
|
||||
t.Fatalf("%s: %s", test.query, err)
|
||||
} else {
|
||||
r := rows.(ResultTag)
|
||||
if tag := r.Tag(); tag != test.tag {
|
||||
t.Fatalf("%s: unexpected tag %q", test.query, tag)
|
||||
}
|
||||
res := r.Result()
|
||||
if ra, _ := res.RowsAffected(); ra != test.ra {
|
||||
t.Fatalf("%s: unexpected rows affected: %d", test.query, ra)
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
vendor/github.com/lib/pq/go18_test.go
generated
vendored
15
vendor/github.com/lib/pq/go18_test.go
generated
vendored
@@ -79,10 +79,7 @@ func TestContextCancelExec(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Delay execution for just a bit until db.ExecContext has begun.
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
cancel()
|
||||
}()
|
||||
defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
|
||||
|
||||
// Not canceled until after the exec has started.
|
||||
if _, err := db.ExecContext(ctx, "select pg_sleep(1)"); err == nil {
|
||||
@@ -106,10 +103,7 @@ func TestContextCancelQuery(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Delay execution for just a bit until db.QueryContext has begun.
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
cancel()
|
||||
}()
|
||||
defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
|
||||
|
||||
// Not canceled until after the exec has started.
|
||||
if _, err := db.QueryContext(ctx, "select pg_sleep(1)"); err == nil {
|
||||
@@ -137,10 +131,7 @@ func TestContextCancelBegin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delay execution for just a bit until tx.Exec has begun.
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
cancel()
|
||||
}()
|
||||
defer time.AfterFunc(time.Millisecond*10, cancel).Stop()
|
||||
|
||||
// Not canceled until after the exec has started.
|
||||
if _, err := tx.Exec("select pg_sleep(1)"); err == nil {
|
||||
|
||||
24
vendor/github.com/lib/pq/listen_example/doc.go
generated
vendored
24
vendor/github.com/lib/pq/listen_example/doc.go
generated
vendored
@@ -51,21 +51,15 @@ mechanism to avoid polling the database while waiting for more work to arrive.
|
||||
}
|
||||
|
||||
func waitForNotification(l *pq.Listener) {
|
||||
for {
|
||||
select {
|
||||
case <-l.Notify:
|
||||
fmt.Println("received notification, new work available")
|
||||
return
|
||||
case <-time.After(90 * time.Second):
|
||||
go func() {
|
||||
l.Ping()
|
||||
}()
|
||||
// Check if there's more work available, just in case it takes
|
||||
// a while for the Listener to notice connection loss and
|
||||
// reconnect.
|
||||
fmt.Println("received no work for 90 seconds, checking for new work")
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-l.Notify:
|
||||
fmt.Println("received notification, new work available")
|
||||
case <-time.After(90 * time.Second):
|
||||
go l.Ping()
|
||||
// Check if there's more work available, just in case it takes
|
||||
// a while for the Listener to notice connection loss and
|
||||
// reconnect.
|
||||
fmt.Println("received no work for 90 seconds, checking for new work")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
24
vendor/github.com/lib/pq/notify_test.go
generated
vendored
24
vendor/github.com/lib/pq/notify_test.go
generated
vendored
@@ -7,7 +7,6 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -235,15 +234,10 @@ func TestConnExecDeadlock(t *testing.T) {
|
||||
// calls Close on the net.Conn; equivalent to a network failure
|
||||
l.Close()
|
||||
|
||||
var done int32 = 0
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
if atomic.LoadInt32(&done) != 1 {
|
||||
panic("timed out")
|
||||
}
|
||||
}()
|
||||
defer time.AfterFunc(10*time.Second, func() {
|
||||
panic("timed out")
|
||||
}).Stop()
|
||||
wg.Wait()
|
||||
atomic.StoreInt32(&done, 1)
|
||||
}
|
||||
|
||||
// Test for ListenerConn being closed while a slow query is executing
|
||||
@@ -271,15 +265,11 @@ func TestListenerConnCloseWhileQueryIsExecuting(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var done int32 = 0
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
if atomic.LoadInt32(&done) != 1 {
|
||||
panic("timed out")
|
||||
}
|
||||
}()
|
||||
|
||||
defer time.AfterFunc(10*time.Second, func() {
|
||||
panic("timed out")
|
||||
}).Stop()
|
||||
wg.Wait()
|
||||
atomic.StoreInt32(&done, 1)
|
||||
}
|
||||
|
||||
func TestNotifyExtra(t *testing.T) {
|
||||
|
||||
12
vendor/github.com/lib/pq/oid/types.go
generated
vendored
12
vendor/github.com/lib/pq/oid/types.go
generated
vendored
@@ -18,6 +18,7 @@ const (
|
||||
T_xid Oid = 28
|
||||
T_cid Oid = 29
|
||||
T_oidvector Oid = 30
|
||||
T_pg_ddl_command Oid = 32
|
||||
T_pg_type Oid = 71
|
||||
T_pg_attribute Oid = 75
|
||||
T_pg_proc Oid = 81
|
||||
@@ -28,6 +29,7 @@ const (
|
||||
T_pg_node_tree Oid = 194
|
||||
T__json Oid = 199
|
||||
T_smgr Oid = 210
|
||||
T_index_am_handler Oid = 325
|
||||
T_point Oid = 600
|
||||
T_lseg Oid = 601
|
||||
T_path Oid = 602
|
||||
@@ -133,6 +135,9 @@ const (
|
||||
T__uuid Oid = 2951
|
||||
T_txid_snapshot Oid = 2970
|
||||
T_fdw_handler Oid = 3115
|
||||
T_pg_lsn Oid = 3220
|
||||
T__pg_lsn Oid = 3221
|
||||
T_tsm_handler Oid = 3310
|
||||
T_anyenum Oid = 3500
|
||||
T_tsvector Oid = 3614
|
||||
T_tsquery Oid = 3615
|
||||
@@ -144,6 +149,8 @@ const (
|
||||
T__regconfig Oid = 3735
|
||||
T_regdictionary Oid = 3769
|
||||
T__regdictionary Oid = 3770
|
||||
T_jsonb Oid = 3802
|
||||
T__jsonb Oid = 3807
|
||||
T_anyrange Oid = 3831
|
||||
T_event_trigger Oid = 3838
|
||||
T_int4range Oid = 3904
|
||||
@@ -158,4 +165,9 @@ const (
|
||||
T__daterange Oid = 3913
|
||||
T_int8range Oid = 3926
|
||||
T__int8range Oid = 3927
|
||||
T_pg_shseclabel Oid = 4066
|
||||
T_regnamespace Oid = 4089
|
||||
T__regnamespace Oid = 4090
|
||||
T_regrole Oid = 4096
|
||||
T__regrole Oid = 4097
|
||||
)
|
||||
|
||||
123
vendor/github.com/lib/pq/ssl.go
generated
vendored
123
vendor/github.com/lib/pq/ssl.go
generated
vendored
@@ -15,7 +15,7 @@ import (
|
||||
func ssl(o values) func(net.Conn) net.Conn {
|
||||
verifyCaOnly := false
|
||||
tlsConf := tls.Config{}
|
||||
switch mode := o.Get("sslmode"); mode {
|
||||
switch mode := o["sslmode"]; mode {
|
||||
// "require" is the default.
|
||||
case "", "require":
|
||||
// We must skip TLS's own verification since it requires full
|
||||
@@ -23,15 +23,19 @@ func ssl(o values) func(net.Conn) net.Conn {
|
||||
tlsConf.InsecureSkipVerify = true
|
||||
|
||||
// From http://www.postgresql.org/docs/current/static/libpq-ssl.html:
|
||||
// Note: For backwards compatibility with earlier versions of PostgreSQL, if a
|
||||
// root CA file exists, the behavior of sslmode=require will be the same as
|
||||
// that of verify-ca, meaning the server certificate is validated against the
|
||||
// CA. Relying on this behavior is discouraged, and applications that need
|
||||
// certificate validation should always use verify-ca or verify-full.
|
||||
if _, err := os.Stat(o.Get("sslrootcert")); err == nil {
|
||||
verifyCaOnly = true
|
||||
} else {
|
||||
o.Set("sslrootcert", "")
|
||||
//
|
||||
// Note: For backwards compatibility with earlier versions of
|
||||
// PostgreSQL, if a root CA file exists, the behavior of
|
||||
// sslmode=require will be the same as that of verify-ca, meaning the
|
||||
// server certificate is validated against the CA. Relying on this
|
||||
// behavior is discouraged, and applications that need certificate
|
||||
// validation should always use verify-ca or verify-full.
|
||||
if sslrootcert, ok := o["sslrootcert"]; ok {
|
||||
if _, err := os.Stat(sslrootcert); err == nil {
|
||||
verifyCaOnly = true
|
||||
} else {
|
||||
delete(o, "sslrootcert")
|
||||
}
|
||||
}
|
||||
case "verify-ca":
|
||||
// We must skip TLS's own verification since it requires full
|
||||
@@ -39,7 +43,7 @@ func ssl(o values) func(net.Conn) net.Conn {
|
||||
tlsConf.InsecureSkipVerify = true
|
||||
verifyCaOnly = true
|
||||
case "verify-full":
|
||||
tlsConf.ServerName = o.Get("host")
|
||||
tlsConf.ServerName = o["host"]
|
||||
case "disable":
|
||||
return nil
|
||||
default:
|
||||
@@ -64,37 +68,42 @@ func ssl(o values) func(net.Conn) net.Conn {
|
||||
// in the user's home directory. The configured files must exist and have
|
||||
// the correct permissions.
|
||||
func sslClientCertificates(tlsConf *tls.Config, o values) {
|
||||
sslkey := o.Get("sslkey")
|
||||
sslcert := o.Get("sslcert")
|
||||
// user.Current() might fail when cross-compiling. We have to ignore the
|
||||
// error and continue without home directory defaults, since we wouldn't
|
||||
// know from where to load them.
|
||||
user, _ := user.Current()
|
||||
|
||||
var cinfo, kinfo os.FileInfo
|
||||
var err error
|
||||
|
||||
if sslcert != "" && sslkey != "" {
|
||||
// Check that both files exist. Note that we don't do any more extensive
|
||||
// checks than this (such as checking that the paths aren't directories);
|
||||
// LoadX509KeyPair() will take care of the rest.
|
||||
cinfo, err = os.Stat(sslcert)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
kinfo, err = os.Stat(sslkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
// Automatically find certificates from ~/.postgresql
|
||||
sslcert, sslkey, cinfo, kinfo = sslHomeCertificates()
|
||||
|
||||
if cinfo == nil || kinfo == nil {
|
||||
// No certificates to load
|
||||
return
|
||||
}
|
||||
// In libpq, the client certificate is only loaded if the setting is not blank.
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037
|
||||
sslcert := o["sslcert"]
|
||||
if len(sslcert) == 0 && user != nil {
|
||||
sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
|
||||
}
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
|
||||
if len(sslcert) == 0 {
|
||||
return
|
||||
}
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
|
||||
if _, err := os.Stat(sslcert); os.IsNotExist(err) {
|
||||
return
|
||||
} else if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// The files must also have the correct permissions
|
||||
sslCertificatePermissions(cinfo, kinfo)
|
||||
// In libpq, the ssl key is only loaded if the setting is not blank.
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222
|
||||
sslkey := o["sslkey"]
|
||||
if len(sslkey) == 0 && user != nil {
|
||||
sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
|
||||
}
|
||||
|
||||
if len(sslkey) > 0 {
|
||||
if err := sslKeyPermissions(sslkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
|
||||
if err != nil {
|
||||
@@ -105,7 +114,10 @@ func sslClientCertificates(tlsConf *tls.Config, o values) {
|
||||
|
||||
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
|
||||
func sslCertificateAuthority(tlsConf *tls.Config, o values) {
|
||||
if sslrootcert := o.Get("sslrootcert"); sslrootcert != "" {
|
||||
// In libpq, the root certificate is only loaded if the setting is not blank.
|
||||
//
|
||||
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
|
||||
if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 {
|
||||
tlsConf.RootCAs = x509.NewCertPool()
|
||||
|
||||
cert, err := ioutil.ReadFile(sslrootcert)
|
||||
@@ -113,41 +125,12 @@ func sslCertificateAuthority(tlsConf *tls.Config, o values) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ok := tlsConf.RootCAs.AppendCertsFromPEM(cert)
|
||||
if !ok {
|
||||
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
|
||||
errorf("couldn't parse pem in sslrootcert")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sslHomeCertificates returns the path and stats of certificates in the current
|
||||
// user's home directory.
|
||||
func sslHomeCertificates() (cert, key string, cinfo, kinfo os.FileInfo) {
|
||||
user, err := user.Current()
|
||||
|
||||
if err != nil {
|
||||
// user.Current() might fail when cross-compiling. We have to ignore the
|
||||
// error and continue without client certificates, since we wouldn't know
|
||||
// from where to load them.
|
||||
return
|
||||
}
|
||||
|
||||
cert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
|
||||
key = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
|
||||
|
||||
cinfo, err = os.Stat(cert)
|
||||
if err != nil {
|
||||
cinfo = nil
|
||||
}
|
||||
|
||||
kinfo, err = os.Stat(key)
|
||||
if err != nil {
|
||||
kinfo = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
|
||||
// verifies the presented certificate against the CA, i.e. the one specified in
|
||||
// sslrootcert or the system CA if sslrootcert was not specified.
|
||||
|
||||
16
vendor/github.com/lib/pq/ssl_permissions.go
generated
vendored
16
vendor/github.com/lib/pq/ssl_permissions.go
generated
vendored
@@ -4,13 +4,17 @@ package pq
|
||||
|
||||
import "os"
|
||||
|
||||
// sslCertificatePermissions checks the permissions on user-supplied certificate
|
||||
// files. The key file should have very little access.
|
||||
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
|
||||
// The key file should have very little access.
|
||||
//
|
||||
// libpq does not check key file permissions on Windows.
|
||||
func sslCertificatePermissions(cert, key os.FileInfo) {
|
||||
kmode := key.Mode()
|
||||
if kmode != kmode&0600 {
|
||||
panic(ErrSSLKeyHasWorldPermissions)
|
||||
func sslKeyPermissions(sslkey string) error {
|
||||
info, err := os.Stat(sslkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode().Perm()&0077 != 0 {
|
||||
return ErrSSLKeyHasWorldPermissions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
198
vendor/github.com/lib/pq/ssl_test.go
generated
vendored
198
vendor/github.com/lib/pq/ssl_test.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
_ "crypto/sha256"
|
||||
"crypto/x509"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -42,10 +41,13 @@ func openSSLConn(t *testing.T, conninfo string) (*sql.DB, error) {
|
||||
}
|
||||
|
||||
func checkSSLSetup(t *testing.T, conninfo string) {
|
||||
db, err := openSSLConn(t, conninfo)
|
||||
if err == nil {
|
||||
db.Close()
|
||||
t.Fatalf("expected error with conninfo=%q", conninfo)
|
||||
_, err := openSSLConn(t, conninfo)
|
||||
if pge, ok := err.(*Error); ok {
|
||||
if pge.Code.Name() != "invalid_authorization_specification" {
|
||||
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,120 +152,128 @@ func TestSSLVerifyCA(t *testing.T) {
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Not OK according to the system CA
|
||||
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
{
|
||||
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest")
|
||||
if _, ok := err.(x509.UnknownAuthorityError); !ok {
|
||||
t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
|
||||
}
|
||||
}
|
||||
_, ok := err.(x509.UnknownAuthorityError)
|
||||
if !ok {
|
||||
t.Fatalf("expected x509.UnknownAuthorityError, got %#+v", err)
|
||||
|
||||
// Still not OK according to the system CA; empty sslrootcert is treated as unspecified.
|
||||
{
|
||||
_, err := openSSLConn(t, "host=postgres sslmode=verify-ca user=pqgossltest sslrootcert=''")
|
||||
if _, ok := err.(x509.UnknownAuthorityError); !ok {
|
||||
t.Fatalf("expected %T, got %#+v", x509.UnknownAuthorityError{}, err)
|
||||
}
|
||||
}
|
||||
|
||||
rootCertPath := filepath.Join(os.Getenv("PQSSLCERTTEST_PATH"), "root.crt")
|
||||
rootCert := "sslrootcert=" + rootCertPath + " "
|
||||
// No match on Common Name, but that's OK
|
||||
_, err = openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest")
|
||||
if err != nil {
|
||||
if _, err := openSSLConn(t, rootCert+"host=127.0.0.1 sslmode=verify-ca user=pqgossltest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Everything OK
|
||||
_, err = openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest")
|
||||
if err != nil {
|
||||
if _, err := openSSLConn(t, rootCert+"host=postgres sslmode=verify-ca user=pqgossltest"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getCertConninfo(t *testing.T, source string) string {
|
||||
var sslkey string
|
||||
var sslcert string
|
||||
|
||||
certpath := os.Getenv("PQSSLCERTTEST_PATH")
|
||||
|
||||
switch source {
|
||||
case "missingkey":
|
||||
sslkey = "/tmp/filedoesnotexist"
|
||||
sslcert = filepath.Join(certpath, "postgresql.crt")
|
||||
case "missingcert":
|
||||
sslkey = filepath.Join(certpath, "postgresql.key")
|
||||
sslcert = "/tmp/filedoesnotexist"
|
||||
case "certtwice":
|
||||
sslkey = filepath.Join(certpath, "postgresql.crt")
|
||||
sslcert = filepath.Join(certpath, "postgresql.crt")
|
||||
case "valid":
|
||||
sslkey = filepath.Join(certpath, "postgresql.key")
|
||||
sslcert = filepath.Join(certpath, "postgresql.crt")
|
||||
default:
|
||||
t.Fatalf("invalid source %q", source)
|
||||
}
|
||||
return fmt.Sprintf("sslmode=require user=pqgosslcert sslkey=%s sslcert=%s", sslkey, sslcert)
|
||||
}
|
||||
|
||||
// Authenticate over SSL using client certificates
|
||||
func TestSSLClientCertificates(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Should also fail without a valid certificate
|
||||
db, err := openSSLConn(t, "sslmode=require user=pqgosslcert")
|
||||
if err == nil {
|
||||
db.Close()
|
||||
t.Fatal("expected error")
|
||||
const baseinfo = "sslmode=require user=pqgosslcert"
|
||||
|
||||
// Certificate not specified, should fail
|
||||
{
|
||||
_, err := openSSLConn(t, baseinfo)
|
||||
if pge, ok := err.(*Error); ok {
|
||||
if pge.Code.Name() != "invalid_authorization_specification" {
|
||||
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
|
||||
}
|
||||
}
|
||||
pge, ok := err.(*Error)
|
||||
|
||||
// Empty certificate specified, should fail
|
||||
{
|
||||
_, err := openSSLConn(t, baseinfo+" sslcert=''")
|
||||
if pge, ok := err.(*Error); ok {
|
||||
if pge.Code.Name() != "invalid_authorization_specification" {
|
||||
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Non-existent certificate specified, should fail
|
||||
{
|
||||
_, err := openSSLConn(t, baseinfo+" sslcert=/tmp/filedoesnotexist")
|
||||
if pge, ok := err.(*Error); ok {
|
||||
if pge.Code.Name() != "invalid_authorization_specification" {
|
||||
t.Fatalf("unexpected error code '%s'", pge.Code.Name())
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("expected %T, got %v", (*Error)(nil), err)
|
||||
}
|
||||
}
|
||||
|
||||
certpath, ok := os.LookupEnv("PQSSLCERTTEST_PATH")
|
||||
if !ok {
|
||||
t.Fatal("expected pq.Error")
|
||||
t.Fatalf("PQSSLCERTTEST_PATH not present in environment")
|
||||
}
|
||||
if pge.Code.Name() != "invalid_authorization_specification" {
|
||||
t.Fatalf("unexpected error code %q", pge.Code.Name())
|
||||
|
||||
sslcert := filepath.Join(certpath, "postgresql.crt")
|
||||
|
||||
// Cert present, key not specified, should fail
|
||||
{
|
||||
_, err := openSSLConn(t, baseinfo+" sslcert="+sslcert)
|
||||
if _, ok := err.(*os.PathError); !ok {
|
||||
t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cert present, empty key specified, should fail
|
||||
{
|
||||
_, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=''")
|
||||
if _, ok := err.(*os.PathError); !ok {
|
||||
t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cert present, non-existent key, should fail
|
||||
{
|
||||
_, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey=/tmp/filedoesnotexist")
|
||||
if _, ok := err.(*os.PathError); !ok {
|
||||
t.Fatalf("expected %T, got %#+v", (*os.PathError)(nil), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Key has wrong permissions (passing the cert as the key), should fail
|
||||
if _, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslcert); err != ErrSSLKeyHasWorldPermissions {
|
||||
t.Fatalf("expected %s, got %#+v", ErrSSLKeyHasWorldPermissions, err)
|
||||
}
|
||||
|
||||
sslkey := filepath.Join(certpath, "postgresql.key")
|
||||
|
||||
// Should work
|
||||
db, err = openSSLConn(t, getCertConninfo(t, "valid"))
|
||||
if err != nil {
|
||||
if db, err := openSSLConn(t, baseinfo+" sslcert="+sslcert+" sslkey="+sslkey); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows, err := db.Query("SELECT 1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rows.Close()
|
||||
}
|
||||
|
||||
// Test errors with ssl certificates
|
||||
func TestSSLClientCertificatesMissingFiles(t *testing.T) {
|
||||
maybeSkipSSLTests(t)
|
||||
// Environment sanity check: should fail without SSL
|
||||
checkSSLSetup(t, "sslmode=disable user=pqgossltest")
|
||||
|
||||
// Key missing, should fail
|
||||
_, err := openSSLConn(t, getCertConninfo(t, "missingkey"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
// should be a PathError
|
||||
_, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %#+v", err)
|
||||
}
|
||||
|
||||
// Cert missing, should fail
|
||||
_, err = openSSLConn(t, getCertConninfo(t, "missingcert"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
// should be a PathError
|
||||
_, ok = err.(*os.PathError)
|
||||
if !ok {
|
||||
t.Fatalf("expected PathError, got %#+v", err)
|
||||
}
|
||||
|
||||
// Key has wrong permissions, should fail
|
||||
_, err = openSSLConn(t, getCertConninfo(t, "certtwice"))
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if err != ErrSSLKeyHasWorldPermissions {
|
||||
t.Fatalf("expected ErrSSLKeyHasWorldPermissions, got %#+v", err)
|
||||
} else {
|
||||
rows, err := db.Query("SELECT 1")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := rows.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := db.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10
vendor/github.com/lib/pq/ssl_windows.go
generated
vendored
10
vendor/github.com/lib/pq/ssl_windows.go
generated
vendored
@@ -2,8 +2,8 @@
|
||||
|
||||
package pq
|
||||
|
||||
import "os"
|
||||
|
||||
// sslCertificatePermissions checks the permissions on user-supplied certificate
|
||||
// files. In libpq, this is a no-op on Windows.
|
||||
func sslCertificatePermissions(cert, key os.FileInfo) {}
|
||||
// sslKeyPermissions checks the permissions on user-supplied ssl key files.
|
||||
// The key file should have very little access.
|
||||
//
|
||||
// libpq does not check key file permissions on Windows.
|
||||
func sslKeyPermissions(string) error { return nil }
|
||||
|
||||
4
vendor/github.com/miekg/dns/README.md
generated
vendored
4
vendor/github.com/miekg/dns/README.md
generated
vendored
@@ -25,6 +25,7 @@ two versions of Go, currently: 1.6 and 1.7.
|
||||
|
||||
A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
|
||||
* https://github.com/coredns/coredns
|
||||
* https://cloudflare.com
|
||||
* https://github.com/abh/geodns
|
||||
* http://www.statdns.com/
|
||||
@@ -54,6 +55,8 @@ A not-so-up-to-date-list-that-may-be-actually-current:
|
||||
* https://github.com/mehrdadrad/mylg
|
||||
* https://github.com/bamarni/dockness
|
||||
* https://github.com/fffaraz/microdns
|
||||
* http://quilt.io
|
||||
* https://github.com/ipdcode/hades (JD.COM)
|
||||
|
||||
Send pull request if you want to be listed here.
|
||||
|
||||
@@ -142,6 +145,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
|
||||
* 6975 - Algorithm Understanding in DNSSEC
|
||||
* 7043 - EUI48/EUI64 records
|
||||
* 7314 - DNS (EDNS) EXPIRE Option
|
||||
* 7828 - edns-tcp-keepalive EDNS0 Option
|
||||
* 7553 - URI record
|
||||
* 7858 - DNS over TLS: Initiation and Performance Considerations (draft)
|
||||
* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies)
|
||||
|
||||
20
vendor/github.com/miekg/dns/client.go
generated
vendored
20
vendor/github.com/miekg/dns/client.go
generated
vendored
@@ -121,12 +121,12 @@ func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, a)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
}
|
||||
if err != nil {
|
||||
return r, rtt, err
|
||||
}
|
||||
if shared {
|
||||
return r.Copy(), rtt, nil
|
||||
}
|
||||
return r, rtt, nil
|
||||
}
|
||||
|
||||
@@ -300,6 +300,18 @@ func tcpMsgLen(t io.Reader) (int, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// As seen with my local router/switch, retursn 1 byte on the above read,
|
||||
// resulting a a ShortRead. Just write it out (instead of loop) and read the
|
||||
// other byte.
|
||||
if n == 1 {
|
||||
n1, err := t.Read(p[1:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += n1
|
||||
}
|
||||
|
||||
if n != 2 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
@@ -400,7 +412,7 @@ func (co *Conn) Write(p []byte) (n int, err error) {
|
||||
n, err := io.Copy(w, bytes.NewReader(p))
|
||||
return int(n), err
|
||||
}
|
||||
n, err = co.Conn.(*net.UDPConn).Write(p)
|
||||
n, err = co.Conn.Write(p)
|
||||
return n, err
|
||||
}
|
||||
|
||||
|
||||
63
vendor/github.com/miekg/dns/client_test.go
generated
vendored
63
vendor/github.com/miekg/dns/client_test.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -77,8 +78,8 @@ func TestClientTLSSync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientSyncBadId(t *testing.T) {
|
||||
HandleFunc("miek.nl.", HelloServerBadId)
|
||||
func TestClientSyncBadID(t *testing.T) {
|
||||
HandleFunc("miek.nl.", HelloServerBadID)
|
||||
defer HandleRemove("miek.nl.")
|
||||
|
||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||
@@ -450,3 +451,61 @@ func TestTimeout(t *testing.T) {
|
||||
t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that responses from deduplicated requests aren't shared between callers
|
||||
func TestConcurrentExchanges(t *testing.T) {
|
||||
cases := make([]*Msg, 2)
|
||||
cases[0] = new(Msg)
|
||||
cases[1] = new(Msg)
|
||||
cases[1].Truncated = true
|
||||
for _, m := range cases {
|
||||
block := make(chan struct{})
|
||||
waiting := make(chan struct{})
|
||||
|
||||
handler := func(w ResponseWriter, req *Msg) {
|
||||
r := m.Copy()
|
||||
r.SetReply(req)
|
||||
|
||||
waiting <- struct{}{}
|
||||
<-block
|
||||
w.WriteMsg(r)
|
||||
}
|
||||
|
||||
HandleFunc("miek.nl.", handler)
|
||||
defer HandleRemove("miek.nl.")
|
||||
|
||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to run test server: %s", err)
|
||||
}
|
||||
defer s.Shutdown()
|
||||
|
||||
m := new(Msg)
|
||||
m.SetQuestion("miek.nl.", TypeSRV)
|
||||
c := &Client{
|
||||
SingleInflight: true,
|
||||
}
|
||||
r := make([]*Msg, 2)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(r))
|
||||
for i := 0; i < len(r); i++ {
|
||||
go func(i int) {
|
||||
r[i], _, _ = c.Exchange(m.Copy(), addrstr)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
select {
|
||||
case <-waiting:
|
||||
case <-time.After(time.Second):
|
||||
t.FailNow()
|
||||
}
|
||||
close(block)
|
||||
wg.Wait()
|
||||
|
||||
if r[0] == r[1] {
|
||||
t.Log("Got same response object, expected non-shared responses")
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
32
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
32
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
@@ -97,3 +97,35 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NameList returns all of the names that should be queried based on the
|
||||
// config. It is based off of go's net/dns name building, but it does not
|
||||
// check the length of the resulting names.
|
||||
func (c *ClientConfig) NameList(name string) []string {
|
||||
// if this domain is already fully qualified, no append needed.
|
||||
if IsFqdn(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Check to see if the name has more labels than Ndots. Do this before making
|
||||
// the domain fully qualified.
|
||||
hasNdots := CountLabel(name) > c.Ndots
|
||||
// Make the domain fully qualified.
|
||||
name = Fqdn(name)
|
||||
|
||||
// Make a list of names based off search.
|
||||
names := []string{}
|
||||
|
||||
// If name has enough dots, try that first.
|
||||
if hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
for _, s := range c.Search {
|
||||
names = append(names, Fqdn(name+s))
|
||||
}
|
||||
// If we didn't have enough dots, try after suffixes.
|
||||
if !hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
37
vendor/github.com/miekg/dns/clientconfig_test.go
generated
vendored
37
vendor/github.com/miekg/dns/clientconfig_test.go
generated
vendored
@@ -48,3 +48,40 @@ func testConfig(t *testing.T, data string) {
|
||||
|
||||
func TestNameserver(t *testing.T) { testConfig(t, normal) }
|
||||
func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) }
|
||||
|
||||
func TestNameList(t *testing.T) {
|
||||
cfg := ClientConfig{
|
||||
Ndots: 1,
|
||||
}
|
||||
// fqdn should be only result returned
|
||||
names := cfg.NameList("miek.nl.")
|
||||
if len(names) != 1 {
|
||||
t.Errorf("NameList returned != 1 names: %v", names)
|
||||
} else if names[0] != "miek.nl." {
|
||||
t.Errorf("NameList didn't return sent fqdn domain: %v", names[0])
|
||||
}
|
||||
|
||||
cfg.Search = []string{
|
||||
"test",
|
||||
}
|
||||
// Sent domain has NDots and search
|
||||
names = cfg.NameList("miek.nl")
|
||||
if len(names) != 2 {
|
||||
t.Errorf("NameList returned != 2 names: %v", names)
|
||||
} else if names[0] != "miek.nl." {
|
||||
t.Errorf("NameList didn't return sent domain first: %v", names[0])
|
||||
} else if names[1] != "miek.nl.test." {
|
||||
t.Errorf("NameList didn't return search last: %v", names[1])
|
||||
}
|
||||
|
||||
cfg.Ndots = 2
|
||||
// Sent domain has less than NDots and search
|
||||
names = cfg.NameList("miek.nl")
|
||||
if len(names) != 2 {
|
||||
t.Errorf("NameList returned != 2 names: %v", names)
|
||||
} else if names[0] != "miek.nl.test." {
|
||||
t.Errorf("NameList didn't return search first: %v", names[0])
|
||||
} else if names[1] != "miek.nl." {
|
||||
t.Errorf("NameList didn't return sent domain last: %v", names[1])
|
||||
}
|
||||
}
|
||||
|
||||
184
vendor/github.com/miekg/dns/compress_generate.go
generated
vendored
Normal file
184
vendor/github.com/miekg/dns/compress_generate.go
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
//+build ignore
|
||||
|
||||
// compression_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will look to see if there are (compressible) names, if so it will add that
|
||||
// type to compressionLenHelperType and comressionLenSearchType which "fake" the
|
||||
// compression so that Len() is fast.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// *** DO NOT MODIFY ***
|
||||
// AUTOGENERATED BY go generate from compress_generate.go
|
||||
|
||||
package dns
|
||||
|
||||
`
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
domainTypes := map[string]bool{} // Types that have a domain name in them (either comressible or not).
|
||||
cdomainTypes := map[string]bool{} // Types that have a compressible domain name in them (subset of domainType)
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
if st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
if st.Tag(i) == `dns:"domain-name"` {
|
||||
domainTypes[o.Name()] = true
|
||||
}
|
||||
if st.Tag(i) == `dns:"cdomain-name"` {
|
||||
cdomainTypes[o.Name()] = true
|
||||
domainTypes[o.Name()] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
domainTypes[o.Name()] = true
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
cdomainTypes[o.Name()] = true
|
||||
domainTypes[o.Name()] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
|
||||
|
||||
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
|
||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||
for name, _ := range domainTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "case *%s:\n", name)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"domain-name"`:
|
||||
fallthrough
|
||||
case `dns:"cdomain-name"`:
|
||||
// For HIP we need to slice over the elements in this slice.
|
||||
fmt.Fprintf(b, `for i := range x.%s {
|
||||
compressionLenHelper(c, x.%s[i])
|
||||
}
|
||||
`, st.Field(i).Name(), st.Field(i).Name())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
out(st.Field(i).Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(b, "}\n}\n\n")
|
||||
|
||||
// compressionLenSearchType - search cdomain-tags types for compressible names.
|
||||
|
||||
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
|
||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||
for name, _ := range cdomainTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "case *%s:\n", name)
|
||||
j := 1
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
out := func(s string, j int) {
|
||||
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
|
||||
}
|
||||
|
||||
// There are no slice types with names that can be compressed.
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
out(st.Field(i).Name(), j)
|
||||
j++
|
||||
}
|
||||
}
|
||||
k := "k1"
|
||||
ok := "ok1"
|
||||
for i := 2; i < j; i++ {
|
||||
k += fmt.Sprintf(" + k%d", i)
|
||||
ok += fmt.Sprintf(" && ok%d", i)
|
||||
}
|
||||
fmt.Fprintf(b, "return %s, %s\n", k, ok)
|
||||
}
|
||||
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Create("zcompress.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
9
vendor/github.com/miekg/dns/dane.go
generated
vendored
9
vendor/github.com/miekg/dns/dane.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
|
||||
@@ -23,20 +22,20 @@ func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (st
|
||||
h := sha256.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
io.WriteString(h, string(cert.Raw))
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
case 2:
|
||||
h := sha512.New()
|
||||
switch selector {
|
||||
case 0:
|
||||
io.WriteString(h, string(cert.Raw))
|
||||
h.Write(cert.Raw)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
case 1:
|
||||
io.WriteString(h, string(cert.RawSubjectPublicKeyInfo))
|
||||
h.Write(cert.RawSubjectPublicKeyInfo)
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
}
|
||||
|
||||
17
vendor/github.com/miekg/dns/dns_test.go
generated
vendored
17
vendor/github.com/miekg/dns/dns_test.go
generated
vendored
@@ -310,6 +310,23 @@ func TestMsgLengthCompressionMalformed(t *testing.T) {
|
||||
m.Len() // Should not crash.
|
||||
}
|
||||
|
||||
func TestMsgCompressLength2(t *testing.T) {
|
||||
msg := new(Msg)
|
||||
msg.Compress = true
|
||||
msg.SetQuestion(Fqdn("bliep."), TypeANY)
|
||||
msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "blaat.", Rrtype: 0x21, Class: 0x1, Ttl: 0x3c}, Port: 0x4c57, Target: "foo.bar."})
|
||||
msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: "foo.bar.", Rrtype: 0x1, Class: 0x1, Ttl: 0x3c}, A: net.IP{0xac, 0x11, 0x0, 0x3}})
|
||||
predicted := msg.Len()
|
||||
buf, err := msg.Pack()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if predicted != len(buf) {
|
||||
t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d",
|
||||
msg.Question[0].Name, len(msg.Answer), predicted, len(buf))
|
||||
}
|
||||
}
|
||||
|
||||
func TestToRFC3597(t *testing.T) {
|
||||
a, _ := NewRR("miek.nl. IN A 10.0.1.1")
|
||||
x := new(RFC3597)
|
||||
|
||||
21
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
21
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
@@ -43,7 +43,7 @@ const (
|
||||
PRIVATEOID uint8 = 254
|
||||
)
|
||||
|
||||
// Map for algorithm names.
|
||||
// AlgorithmToString is a map of algorithm IDs to algorithm names.
|
||||
var AlgorithmToString = map[uint8]string{
|
||||
RSAMD5: "RSAMD5",
|
||||
DH: "DH",
|
||||
@@ -61,10 +61,10 @@ var AlgorithmToString = map[uint8]string{
|
||||
PRIVATEOID: "PRIVATEOID",
|
||||
}
|
||||
|
||||
// Map of algorithm strings.
|
||||
// StringToAlgorithm is the reverse of AlgorithmToString.
|
||||
var StringToAlgorithm = reverseInt8(AlgorithmToString)
|
||||
|
||||
// Map of algorithm crypto hashes.
|
||||
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
||||
var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
||||
RSASHA1: crypto.SHA1,
|
||||
@@ -85,7 +85,7 @@ const (
|
||||
SHA512 // Experimental
|
||||
)
|
||||
|
||||
// Map for hash names.
|
||||
// HashToString is a map of hash IDs to names.
|
||||
var HashToString = map[uint8]string{
|
||||
SHA1: "SHA1",
|
||||
SHA256: "SHA256",
|
||||
@@ -94,7 +94,7 @@ var HashToString = map[uint8]string{
|
||||
SHA512: "SHA512",
|
||||
}
|
||||
|
||||
// Map of hash strings.
|
||||
// StringToHash is a map of names to hash IDs.
|
||||
var StringToHash = reverseInt8(HashToString)
|
||||
|
||||
// DNSKEY flag values.
|
||||
@@ -208,9 +208,6 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||
// "|" denotes concatenation
|
||||
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
|
||||
|
||||
// digest buffer
|
||||
digest := append(owner, wire...) // another copy
|
||||
|
||||
var hash crypto.Hash
|
||||
switch h {
|
||||
case SHA1:
|
||||
@@ -226,7 +223,8 @@ func (k *DNSKEY) ToDS(h uint8) *DS {
|
||||
}
|
||||
|
||||
s := hash.New()
|
||||
s.Write(digest)
|
||||
s.Write(owner)
|
||||
s.Write(wire)
|
||||
ds.Digest = hex.EncodeToString(s.Sum(nil))
|
||||
return ds
|
||||
}
|
||||
@@ -297,7 +295,6 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signdata = append(signdata, wire...)
|
||||
|
||||
hash, ok := AlgorithmToHash[rr.Algorithm]
|
||||
if !ok {
|
||||
@@ -306,6 +303,7 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signdata)
|
||||
h.Write(wire)
|
||||
|
||||
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
@@ -415,7 +413,6 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signeddata = append(signeddata, wire...)
|
||||
|
||||
sigbuf := rr.sigBuf() // Get the binary signature data
|
||||
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
|
||||
@@ -438,6 +435,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
|
||||
|
||||
case ECDSAP256SHA256, ECDSAP384SHA384:
|
||||
@@ -452,6 +450,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signeddata)
|
||||
h.Write(wire)
|
||||
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
|
||||
return nil
|
||||
}
|
||||
|
||||
16
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
16
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
@@ -121,17 +121,17 @@ func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
|
||||
// RFC 3110: Section 2. RSA Public KEY Resource Records
|
||||
func exponentToBuf(_E int) []byte {
|
||||
var buf []byte
|
||||
i := big.NewInt(int64(_E))
|
||||
if len(i.Bytes()) < 256 {
|
||||
buf = make([]byte, 1)
|
||||
buf[0] = uint8(len(i.Bytes()))
|
||||
i := big.NewInt(int64(_E)).Bytes()
|
||||
if len(i) < 256 {
|
||||
buf = make([]byte, 1, 1+len(i))
|
||||
buf[0] = uint8(len(i))
|
||||
} else {
|
||||
buf = make([]byte, 3)
|
||||
buf = make([]byte, 3, 3+len(i))
|
||||
buf[0] = 0
|
||||
buf[1] = uint8(len(i.Bytes()) >> 8)
|
||||
buf[2] = uint8(len(i.Bytes()))
|
||||
buf[1] = uint8(len(i) >> 8)
|
||||
buf[2] = uint8(len(i))
|
||||
}
|
||||
buf = append(buf, i.Bytes()...)
|
||||
buf = append(buf, i...)
|
||||
return buf
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
2
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
@@ -36,7 +36,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
|
||||
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
|
||||
12
vendor/github.com/miekg/dns/edns.go
generated
vendored
12
vendor/github.com/miekg/dns/edns.go
generated
vendored
@@ -157,7 +157,7 @@ type EDNS0 interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
// The nsid EDNS0 option is used to retrieve a nameserver
|
||||
// EDNS0_NSID option is used to retrieve a nameserver
|
||||
// identifier. When sending a request Nsid must be set to the empty string
|
||||
// The identifier is an opaque string encoded as hex.
|
||||
// Basic use pattern for creating an nsid option:
|
||||
@@ -197,7 +197,7 @@ func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
|
||||
// e := new(dns.EDNS0_SUBNET)
|
||||
// e.Code = dns.EDNS0SUBNET
|
||||
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
|
||||
// e.SourceNetMask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceScope = 0
|
||||
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
|
||||
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
|
||||
@@ -301,7 +301,7 @@ func (e *EDNS0_SUBNET) String() (s string) {
|
||||
return
|
||||
}
|
||||
|
||||
// The Cookie EDNS0 option
|
||||
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
@@ -543,15 +543,15 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
||||
// the TCP connection alive. See RFC 7828.
|
||||
type EDNS0_TCP_KEEPALIVE struct {
|
||||
Code uint16 // Always EDNSTCPKEEPALIVE
|
||||
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
|
||||
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 {
|
||||
return EDNS0TCPKEEPALIVE
|
||||
}
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
|
||||
if e.Timeout != 0 && e.Length != 2 {
|
||||
|
||||
3
vendor/github.com/miekg/dns/idn/punycode.go
generated
vendored
3
vendor/github.com/miekg/dns/idn/punycode.go
generated
vendored
@@ -242,11 +242,8 @@ func encode(input []byte) []byte {
|
||||
t, k, cp rune // weight and codepoint calculation
|
||||
)
|
||||
|
||||
s := &bytes.Buffer{}
|
||||
for h := basiclen; h < fulllen; n, delta = n+1, delta+1 {
|
||||
nextltr = next(b, n)
|
||||
s.Truncate(0)
|
||||
s.WriteRune(nextltr)
|
||||
delta, n = delta+(nextltr-n)*rune(h+1), nextltr
|
||||
|
||||
for _, ltr = range b {
|
||||
|
||||
188
vendor/github.com/miekg/dns/msg.go
generated
vendored
188
vendor/github.com/miekg/dns/msg.go
generated
vendored
@@ -9,6 +9,7 @@
|
||||
package dns
|
||||
|
||||
//go:generate go run msg_generate.go
|
||||
//go:generate go run compress_generate.go
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
@@ -16,22 +17,9 @@ import (
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Initialize default math/rand source using crypto/rand to provide better
|
||||
// security without the performance trade-off.
|
||||
buf := make([]byte, 8)
|
||||
_, err := crand.Read(buf)
|
||||
if err != nil {
|
||||
// Failed to read from cryptographic source, fallback to default initial
|
||||
// seed (1) by returning early
|
||||
return
|
||||
}
|
||||
seed := binary.BigEndian.Uint64(buf)
|
||||
rand.Seed(int64(seed))
|
||||
}
|
||||
|
||||
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
|
||||
|
||||
var (
|
||||
@@ -66,11 +54,45 @@ var (
|
||||
// dns.Id = func() uint16 { return 3 }
|
||||
var Id func() uint16 = id
|
||||
|
||||
var (
|
||||
idLock sync.Mutex
|
||||
idRand *rand.Rand
|
||||
)
|
||||
|
||||
// id returns a 16 bits random number to be used as a
|
||||
// message id. The random provided should be good enough.
|
||||
func id() uint16 {
|
||||
id32 := rand.Uint32()
|
||||
return uint16(id32)
|
||||
idLock.Lock()
|
||||
|
||||
if idRand == nil {
|
||||
// This (partially) works around
|
||||
// https://github.com/golang/go/issues/11833 by only
|
||||
// seeding idRand upon the first call to id.
|
||||
|
||||
var seed int64
|
||||
var buf [8]byte
|
||||
|
||||
if _, err := crand.Read(buf[:]); err == nil {
|
||||
seed = int64(binary.LittleEndian.Uint64(buf[:]))
|
||||
} else {
|
||||
seed = rand.Int63()
|
||||
}
|
||||
|
||||
idRand = rand.New(rand.NewSource(seed))
|
||||
}
|
||||
|
||||
// The call to idRand.Uint32 must be within the
|
||||
// mutex lock because *rand.Rand is not safe for
|
||||
// concurrent use.
|
||||
//
|
||||
// There is no added performance overhead to calling
|
||||
// idRand.Uint32 inside a mutex lock over just
|
||||
// calling rand.Uint32 as the global math/rand rng
|
||||
// is internally protected by a sync.Mutex.
|
||||
id := uint16(idRand.Uint32())
|
||||
|
||||
idLock.Unlock()
|
||||
return id
|
||||
}
|
||||
|
||||
// MsgHdr is a a manually-unpacked version of (id, bits).
|
||||
@@ -241,7 +263,9 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
|
||||
bsFresh = true
|
||||
}
|
||||
// Don't try to compress '.'
|
||||
if compress && roBs[begin:] != "." {
|
||||
// We should only compress when compress it true, but we should also still pick
|
||||
// up names that can be used for *future* compression(s).
|
||||
if compression != nil && roBs[begin:] != "." {
|
||||
if p, ok := compression[roBs[begin:]]; !ok {
|
||||
// Only offsets smaller than this can be used.
|
||||
if offset < maxCompressionOffset {
|
||||
@@ -303,6 +327,7 @@ End:
|
||||
// UnpackDomainName unpacks a domain name into a string.
|
||||
func UnpackDomainName(msg []byte, off int) (string, int, error) {
|
||||
s := make([]byte, 0, 64)
|
||||
labels := 0
|
||||
off1 := 0
|
||||
lenmsg := len(msg)
|
||||
ptr := 0 // number of pointers followed
|
||||
@@ -345,6 +370,15 @@ Loop:
|
||||
}
|
||||
}
|
||||
}
|
||||
// never exceed the allowed label count lenght (63)
|
||||
if labels >= 63 {
|
||||
return "", lenmsg, &Error{err: "name exceeds 63 labels"}
|
||||
}
|
||||
labels += 1
|
||||
// never exceed the allowed doman name length (255 octets)
|
||||
if len(s) >= 255 {
|
||||
return "", lenmsg, &Error{err: "name exceeded allowed 255 octets"}
|
||||
}
|
||||
s = append(s, '.')
|
||||
off += c
|
||||
case 0xC0:
|
||||
@@ -364,6 +398,9 @@ Loop:
|
||||
if ptr++; ptr > 10 {
|
||||
return "", lenmsg, &Error{err: "too many compression pointers"}
|
||||
}
|
||||
// pointer should guarantee that it advances and points forwards at least
|
||||
// but the condition on previous three lines guarantees that it's
|
||||
// at least loop-free
|
||||
off = (c^0xC0)<<8 | int(c1)
|
||||
default:
|
||||
// 0x80 and 0x40 are reserved
|
||||
@@ -710,12 +747,10 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
|
||||
|
||||
// We need the uncompressed length here, because we first pack it and then compress it.
|
||||
msg = buf
|
||||
compress := dns.Compress
|
||||
dns.Compress = false
|
||||
if packLen := dns.Len() + 1; len(msg) < packLen {
|
||||
uncompressedLen := compressedLen(dns, false)
|
||||
if packLen := uncompressedLen + 1; len(msg) < packLen {
|
||||
msg = make([]byte, packLen)
|
||||
}
|
||||
dns.Compress = compress
|
||||
|
||||
// Pack it in: header and then the pieces.
|
||||
off := 0
|
||||
@@ -868,16 +903,18 @@ func (dns *Msg) String() string {
|
||||
// If dns.Compress is true compression it is taken into account. Len()
|
||||
// is provided to be a faster way to get the size of the resulting packet,
|
||||
// than packing it, measuring the size and discarding the buffer.
|
||||
func (dns *Msg) Len() int {
|
||||
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
|
||||
|
||||
// compressedLen returns the message length when in compressed wire format
|
||||
// when compress is true, otherwise the uncompressed length is returned.
|
||||
func compressedLen(dns *Msg, compress bool) int {
|
||||
// We always return one more than needed.
|
||||
l := 12 // Message header is always 12 bytes
|
||||
var compression map[string]int
|
||||
if dns.Compress {
|
||||
compression = make(map[string]int)
|
||||
}
|
||||
compression := map[string]int{}
|
||||
|
||||
for i := 0; i < len(dns.Question); i++ {
|
||||
l += dns.Question[i].len()
|
||||
if dns.Compress {
|
||||
if compress {
|
||||
compressionLenHelper(compression, dns.Question[i].Name)
|
||||
}
|
||||
}
|
||||
@@ -886,7 +923,7 @@ func (dns *Msg) Len() int {
|
||||
continue
|
||||
}
|
||||
l += dns.Answer[i].len()
|
||||
if dns.Compress {
|
||||
if compress {
|
||||
k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
@@ -904,7 +941,7 @@ func (dns *Msg) Len() int {
|
||||
continue
|
||||
}
|
||||
l += dns.Ns[i].len()
|
||||
if dns.Compress {
|
||||
if compress {
|
||||
k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
@@ -922,7 +959,7 @@ func (dns *Msg) Len() int {
|
||||
continue
|
||||
}
|
||||
l += dns.Extra[i].len()
|
||||
if dns.Compress {
|
||||
if compress {
|
||||
k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
@@ -970,97 +1007,6 @@ func compressionLenSearch(c map[string]int, s string) (int, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go
|
||||
func compressionLenHelperType(c map[string]int, r RR) {
|
||||
switch x := r.(type) {
|
||||
case *NS:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
case *MX:
|
||||
compressionLenHelper(c, x.Mx)
|
||||
case *CNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *PTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *SOA:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
case *MB:
|
||||
compressionLenHelper(c, x.Mb)
|
||||
case *MG:
|
||||
compressionLenHelper(c, x.Mg)
|
||||
case *MR:
|
||||
compressionLenHelper(c, x.Mr)
|
||||
case *MF:
|
||||
compressionLenHelper(c, x.Mf)
|
||||
case *MD:
|
||||
compressionLenHelper(c, x.Md)
|
||||
case *RT:
|
||||
compressionLenHelper(c, x.Host)
|
||||
case *RP:
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
compressionLenHelper(c, x.Txt)
|
||||
case *MINFO:
|
||||
compressionLenHelper(c, x.Rmail)
|
||||
compressionLenHelper(c, x.Email)
|
||||
case *AFSDB:
|
||||
compressionLenHelper(c, x.Hostname)
|
||||
case *SRV:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *NAPTR:
|
||||
compressionLenHelper(c, x.Replacement)
|
||||
case *RRSIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *NSEC:
|
||||
compressionLenHelper(c, x.NextDomain)
|
||||
// HIP?
|
||||
}
|
||||
}
|
||||
|
||||
// Only search on compressing these types.
|
||||
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
|
||||
switch x := r.(type) {
|
||||
case *NS:
|
||||
return compressionLenSearch(c, x.Ns)
|
||||
case *MX:
|
||||
return compressionLenSearch(c, x.Mx)
|
||||
case *CNAME:
|
||||
return compressionLenSearch(c, x.Target)
|
||||
case *DNAME:
|
||||
return compressionLenSearch(c, x.Target)
|
||||
case *PTR:
|
||||
return compressionLenSearch(c, x.Ptr)
|
||||
case *SOA:
|
||||
k, ok := compressionLenSearch(c, x.Ns)
|
||||
k1, ok1 := compressionLenSearch(c, x.Mbox)
|
||||
if !ok && !ok1 {
|
||||
return 0, false
|
||||
}
|
||||
return k + k1, true
|
||||
case *MB:
|
||||
return compressionLenSearch(c, x.Mb)
|
||||
case *MG:
|
||||
return compressionLenSearch(c, x.Mg)
|
||||
case *MR:
|
||||
return compressionLenSearch(c, x.Mr)
|
||||
case *MF:
|
||||
return compressionLenSearch(c, x.Mf)
|
||||
case *MD:
|
||||
return compressionLenSearch(c, x.Md)
|
||||
case *RT:
|
||||
return compressionLenSearch(c, x.Host)
|
||||
case *MINFO:
|
||||
k, ok := compressionLenSearch(c, x.Rmail)
|
||||
k1, ok1 := compressionLenSearch(c, x.Email)
|
||||
if !ok && !ok1 {
|
||||
return 0, false
|
||||
}
|
||||
return k + k1, true
|
||||
case *AFSDB:
|
||||
return compressionLenSearch(c, x.Hostname)
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Copy returns a new RR which is a deep-copy of r.
|
||||
func Copy(r RR) RR { r1 := r.copy(); return r1 }
|
||||
|
||||
|
||||
11
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
11
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
@@ -3,7 +3,6 @@ package dns
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -36,15 +35,15 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||
}
|
||||
|
||||
// k = 0
|
||||
name = append(name, wire...)
|
||||
io.WriteString(s, string(name))
|
||||
s.Write(name)
|
||||
s.Write(wire)
|
||||
nsec3 := s.Sum(nil)
|
||||
// k > 0
|
||||
for k := uint16(0); k < iter; k++ {
|
||||
s.Reset()
|
||||
nsec3 = append(nsec3, wire...)
|
||||
io.WriteString(s, string(nsec3))
|
||||
nsec3 = s.Sum(nil)
|
||||
s.Write(nsec3)
|
||||
s.Write(wire)
|
||||
nsec3 = s.Sum(nsec3[:0])
|
||||
}
|
||||
return toBase32(nsec3)
|
||||
}
|
||||
|
||||
6
vendor/github.com/miekg/dns/parse_test.go
generated
vendored
6
vendor/github.com/miekg/dns/parse_test.go
generated
vendored
@@ -836,11 +836,7 @@ func TestSRVPacking(t *testing.T) {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
port := 8484
|
||||
tmp, err := strconv.Atoi(p)
|
||||
if err == nil {
|
||||
port = tmp
|
||||
}
|
||||
port, _ := strconv.ParseUint(p, 10, 16)
|
||||
|
||||
rr := &SRV{
|
||||
Hdr: RR_Header{Name: "somename.",
|
||||
|
||||
4
vendor/github.com/miekg/dns/reverse.go
generated
vendored
4
vendor/github.com/miekg/dns/reverse.go
generated
vendored
@@ -6,10 +6,10 @@ var StringToType = reverseInt16(TypeToString)
|
||||
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||
var StringToClass = reverseInt16(ClassToString)
|
||||
|
||||
// Map of opcodes strings.
|
||||
// StringToOpcode is a map of opcodes to strings.
|
||||
var StringToOpcode = reverseInt(OpcodeToString)
|
||||
|
||||
// Map of rcodes strings.
|
||||
// StringToRcode is a map of rcodes to strings.
|
||||
var StringToRcode = reverseInt(RcodeToString)
|
||||
|
||||
// Reverse a map
|
||||
|
||||
8
vendor/github.com/miekg/dns/scan.go
generated
vendored
8
vendor/github.com/miekg/dns/scan.go
generated
vendored
@@ -819,8 +819,8 @@ func classToInt(token string) (uint16, bool) {
|
||||
if len(token) < offset+1 {
|
||||
return 0, false
|
||||
}
|
||||
class, ok := strconv.Atoi(token[offset:])
|
||||
if ok != nil || class > maxUint16 {
|
||||
class, err := strconv.ParseUint(token[offset:], 10, 16)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return uint16(class), true
|
||||
@@ -832,8 +832,8 @@ func typeToInt(token string) (uint16, bool) {
|
||||
if len(token) < offset+1 {
|
||||
return 0, false
|
||||
}
|
||||
typ, ok := strconv.Atoi(token[offset:])
|
||||
if ok != nil || typ > maxUint16 {
|
||||
typ, err := strconv.ParseUint(token[offset:], 10, 16)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return uint16(typ), true
|
||||
|
||||
116
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
116
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
@@ -447,7 +447,7 @@ func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad MX Pref", l}, ""
|
||||
}
|
||||
@@ -476,7 +476,7 @@ func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil {
|
||||
return nil, &ParseError{f, "bad RT Preference", l}, ""
|
||||
}
|
||||
@@ -506,7 +506,7 @@ func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad AFSDB Subtype", l}, ""
|
||||
}
|
||||
@@ -551,7 +551,7 @@ func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad KX Pref", l}, ""
|
||||
}
|
||||
@@ -665,7 +665,7 @@ func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.err {
|
||||
return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
|
||||
}
|
||||
if j, e := strconv.Atoi(l.token); e != nil {
|
||||
if j, e := strconv.ParseUint(l.token, 10, 32); e != nil {
|
||||
if i == 0 {
|
||||
// Serial should be a number
|
||||
return nil, &ParseError{f, "bad SOA zone parameter", l}, ""
|
||||
@@ -705,21 +705,21 @@ func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SRV Priority", l}, ""
|
||||
}
|
||||
rr.Priority = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SRV Weight", l}, ""
|
||||
}
|
||||
rr.Weight = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SRV Port", l}, ""
|
||||
}
|
||||
@@ -749,14 +749,14 @@ func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NAPTR Order", l}, ""
|
||||
}
|
||||
rr.Order = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NAPTR Preference", l}, ""
|
||||
}
|
||||
@@ -885,7 +885,7 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 32)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad LOC Latitude", l}, ""
|
||||
}
|
||||
@@ -897,7 +897,7 @@ func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
|
||||
goto East
|
||||
}
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 32)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad LOC Latitude minutes", l}, ""
|
||||
}
|
||||
@@ -923,7 +923,7 @@ East:
|
||||
// East
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
if i, e := strconv.Atoi(l.token); e != nil || l.err {
|
||||
if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad LOC Longitude", l}, ""
|
||||
} else {
|
||||
rr.Longitude = 1000 * 60 * 60 * uint32(i)
|
||||
@@ -934,7 +934,7 @@ East:
|
||||
if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
|
||||
goto Altitude
|
||||
}
|
||||
if i, e := strconv.Atoi(l.token); e != nil || l.err {
|
||||
if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad LOC Longitude minutes", l}, ""
|
||||
} else {
|
||||
rr.Longitude += 1000 * 60 * uint32(i)
|
||||
@@ -1016,7 +1016,7 @@ func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, ""
|
||||
}
|
||||
@@ -1077,14 +1077,14 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
}
|
||||
if v, ok := StringToCertType[l.token]; ok {
|
||||
rr.Type = v
|
||||
} else if i, e := strconv.Atoi(l.token); e != nil {
|
||||
} else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil {
|
||||
return nil, &ParseError{f, "bad CERT Type", l}, ""
|
||||
} else {
|
||||
rr.Type = uint16(i)
|
||||
}
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad CERT KeyTag", l}, ""
|
||||
}
|
||||
@@ -1093,7 +1093,7 @@ func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
l = <-c // zString
|
||||
if v, ok := StringToAlgorithm[l.token]; ok {
|
||||
rr.Algorithm = v
|
||||
} else if i, e := strconv.Atoi(l.token); e != nil {
|
||||
} else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
|
||||
return nil, &ParseError{f, "bad CERT Algorithm", l}, ""
|
||||
} else {
|
||||
rr.Algorithm = uint8(i)
|
||||
@@ -1148,21 +1148,21 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
}
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, err := strconv.Atoi(l.token)
|
||||
i, err := strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RRSIG Algorithm", l}, ""
|
||||
}
|
||||
rr.Algorithm = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, err = strconv.Atoi(l.token)
|
||||
i, err = strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RRSIG Labels", l}, ""
|
||||
}
|
||||
rr.Labels = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, err = strconv.Atoi(l.token)
|
||||
i, err = strconv.ParseUint(l.token, 10, 32)
|
||||
if err != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, ""
|
||||
}
|
||||
@@ -1193,7 +1193,7 @@ func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
}
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, err = strconv.Atoi(l.token)
|
||||
i, err = strconv.ParseUint(l.token, 10, 16)
|
||||
if err != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RRSIG KeyTag", l}, ""
|
||||
}
|
||||
@@ -1274,21 +1274,21 @@ func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NSEC3 Hash", l}, ""
|
||||
}
|
||||
rr.Hash = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NSEC3 Flags", l}, ""
|
||||
}
|
||||
rr.Flags = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NSEC3 Iterations", l}, ""
|
||||
}
|
||||
@@ -1342,21 +1342,21 @@ func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, strin
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, ""
|
||||
}
|
||||
rr.Hash = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, ""
|
||||
}
|
||||
rr.Flags = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, ""
|
||||
}
|
||||
@@ -1440,14 +1440,14 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SSHFP Algorithm", l}, ""
|
||||
}
|
||||
rr.Algorithm = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SSHFP Type", l}, ""
|
||||
}
|
||||
@@ -1469,21 +1469,21 @@ func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, str
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad " + typ + " Flags", l}, ""
|
||||
}
|
||||
rr.Flags = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad " + typ + " Protocol", l}, ""
|
||||
}
|
||||
rr.Protocol = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
|
||||
}
|
||||
@@ -1525,21 +1525,21 @@ func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RKEY Flags", l}, ""
|
||||
}
|
||||
rr.Flags = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RKEY Protocol", l}, ""
|
||||
}
|
||||
rr.Protocol = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad RKEY Algorithm", l}, ""
|
||||
}
|
||||
@@ -1610,14 +1610,14 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string)
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, ""
|
||||
}
|
||||
rr.KeyTag = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
if i, e := strconv.Atoi(l.token); e != nil {
|
||||
if i, e = strconv.ParseUint(l.token, 10, 8); e != nil {
|
||||
i, ok := StringToAlgorithm[l.tokenUpper]
|
||||
if !ok || l.err {
|
||||
return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, ""
|
||||
@@ -1628,7 +1628,7 @@ func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string)
|
||||
}
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad " + typ + " DigestType", l}, ""
|
||||
}
|
||||
@@ -1669,14 +1669,14 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad TA KeyTag", l}, ""
|
||||
}
|
||||
rr.KeyTag = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
if i, e := strconv.Atoi(l.token); e != nil {
|
||||
if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
|
||||
i, ok := StringToAlgorithm[l.tokenUpper]
|
||||
if !ok || l.err {
|
||||
return nil, &ParseError{f, "bad TA Algorithm", l}, ""
|
||||
@@ -1687,7 +1687,7 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
}
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad TA DigestType", l}, ""
|
||||
}
|
||||
@@ -1707,21 +1707,21 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad TLSA Usage", l}, ""
|
||||
}
|
||||
rr.Usage = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad TLSA Selector", l}, ""
|
||||
}
|
||||
rr.Selector = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad TLSA MatchingType", l}, ""
|
||||
}
|
||||
@@ -1742,21 +1742,21 @@ func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SMIMEA Usage", l}, ""
|
||||
}
|
||||
rr.Usage = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SMIMEA Selector", l}, ""
|
||||
}
|
||||
rr.Selector = uint8(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 8)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, ""
|
||||
}
|
||||
@@ -1842,14 +1842,14 @@ func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
return rr, nil, ""
|
||||
}
|
||||
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad URI Priority", l}, ""
|
||||
}
|
||||
rr.Priority = uint16(i)
|
||||
<-c // zBlank
|
||||
l = <-c
|
||||
i, e = strconv.Atoi(l.token)
|
||||
i, e = strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad URI Weight", l}, ""
|
||||
}
|
||||
@@ -1888,7 +1888,7 @@ func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad NID Preference", l}, ""
|
||||
}
|
||||
@@ -1911,7 +1911,7 @@ func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad L32 Preference", l}, ""
|
||||
}
|
||||
@@ -1933,7 +1933,7 @@ func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad LP Preference", l}, ""
|
||||
}
|
||||
@@ -1966,7 +1966,7 @@ func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad L64 Preference", l}, ""
|
||||
}
|
||||
@@ -1988,7 +1988,7 @@ func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 32)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad UID Uid", l}, ""
|
||||
}
|
||||
@@ -2003,7 +2003,7 @@ func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 32)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad GID Gid", l}, ""
|
||||
}
|
||||
@@ -2033,7 +2033,7 @@ func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, ""
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
i, e := strconv.ParseUint(l.token, 10, 16)
|
||||
if e != nil || l.err {
|
||||
return nil, &ParseError{f, "bad PX Preference", l}, ""
|
||||
}
|
||||
@@ -2079,7 +2079,7 @@ func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, err := strconv.Atoi(l.token)
|
||||
i, err := strconv.ParseUint(l.token, 10, 8)
|
||||
if err != nil || l.err {
|
||||
return nil, &ParseError{f, "bad CAA Flag", l}, ""
|
||||
}
|
||||
|
||||
4
vendor/github.com/miekg/dns/server_test.go
generated
vendored
4
vendor/github.com/miekg/dns/server_test.go
generated
vendored
@@ -20,7 +20,7 @@ func HelloServer(w ResponseWriter, req *Msg) {
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
func HelloServerBadId(w ResponseWriter, req *Msg) {
|
||||
func HelloServerBadID(w ResponseWriter, req *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetReply(req)
|
||||
m.Id++
|
||||
@@ -548,7 +548,7 @@ func TestHandlerCloseTCP(t *testing.T) {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second / 10)
|
||||
tries += 1
|
||||
tries++
|
||||
goto exchange
|
||||
}
|
||||
}()
|
||||
|
||||
5
vendor/github.com/miekg/dns/sig0.go
generated
vendored
5
vendor/github.com/miekg/dns/sig0.go
generated
vendored
@@ -60,16 +60,15 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
sig := string(signature)
|
||||
|
||||
buf = append(buf, sig...)
|
||||
buf = append(buf, signature...)
|
||||
if len(buf) > int(^uint16(0)) {
|
||||
return nil, ErrBuf
|
||||
}
|
||||
// Adjust sig data length
|
||||
rdoff := len(mbuf) + 1 + 2 + 2 + 4
|
||||
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
|
||||
rdlen += uint16(len(sig))
|
||||
rdlen += uint16(len(signature))
|
||||
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
|
||||
// Adjust additional count
|
||||
adc := binary.BigEndian.Uint16(buf[10:])
|
||||
|
||||
8
vendor/github.com/miekg/dns/smimea.go
generated
vendored
8
vendor/github.com/miekg/dns/smimea.go
generated
vendored
@@ -33,15 +33,15 @@ func (r *SMIMEA) Verify(cert *x509.Certificate) error {
|
||||
return ErrSig // ErrSig, really?
|
||||
}
|
||||
|
||||
// SIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
|
||||
func SMIMEAName(email_address string, domain_name string) (string, error) {
|
||||
func SMIMEAName(email, domain string) (string, error) {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(email_address))
|
||||
hasher.Write([]byte(email))
|
||||
|
||||
// RFC Section 3: "The local-part is hashed using the SHA2-256
|
||||
// algorithm with the hash truncated to 28 octets and
|
||||
// represented in its hexadecimal representation to become the
|
||||
// left-most label in the prepared domain name"
|
||||
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil
|
||||
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
|
||||
}
|
||||
|
||||
3
vendor/github.com/miekg/dns/tsig.go
generated
vendored
3
vendor/github.com/miekg/dns/tsig.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -124,7 +123,7 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
|
||||
default:
|
||||
return nil, "", ErrKeyAlg
|
||||
}
|
||||
io.WriteString(h, string(buf))
|
||||
h.Write(buf)
|
||||
t.MAC = hex.EncodeToString(h.Sum(nil))
|
||||
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
|
||||
|
||||
|
||||
2
vendor/github.com/miekg/dns/types.go
generated
vendored
2
vendor/github.com/miekg/dns/types.go
generated
vendored
@@ -144,7 +144,7 @@ const (
|
||||
OpcodeUpdate = 5
|
||||
)
|
||||
|
||||
// Headers is the wire format for the DNS packet header.
|
||||
// Header is the wire format for the DNS packet header.
|
||||
type Header struct {
|
||||
Id uint16
|
||||
Bits uint16
|
||||
|
||||
2
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
2
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
@@ -197,7 +197,7 @@ func main() {
|
||||
case st.Tag(i) == "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("l += 1 // %s\n")
|
||||
o("l++ // %s\n")
|
||||
case types.Uint16:
|
||||
o("l += 2 // %s\n")
|
||||
case types.Uint32:
|
||||
|
||||
26
vendor/github.com/miekg/dns/udp.go
generated
vendored
26
vendor/github.com/miekg/dns/udp.go
generated
vendored
@@ -1,10 +1,9 @@
|
||||
// +build !windows,!plan9
|
||||
// +build !windows
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// SessionUDP holds the remote address and the associated
|
||||
@@ -17,29 +16,6 @@ type SessionUDP struct {
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// setUDPSocketOptions sets the UDP socket options.
|
||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
sa, err := getUDPSocketName(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch sa.(type) {
|
||||
case *syscall.SockaddrInet6:
|
||||
v6only, err := getUDPSocketOptions6Only(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setUDPSocketOptions6(conn)
|
||||
if !v6only {
|
||||
setUDPSocketOptions4(conn)
|
||||
}
|
||||
case *syscall.SockaddrInet4:
|
||||
setUDPSocketOptions4(conn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
|
||||
25
vendor/github.com/miekg/dns/udp_linux.go
generated
vendored
25
vendor/github.com/miekg/dns/udp_linux.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build linux,!appengine
|
||||
|
||||
package dns
|
||||
|
||||
@@ -15,6 +15,29 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// setUDPSocketOptions sets the UDP socket options.
|
||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
sa, err := getUDPSocketName(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch sa.(type) {
|
||||
case *syscall.SockaddrInet6:
|
||||
v6only, err := getUDPSocketOptions6Only(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setUDPSocketOptions6(conn)
|
||||
if !v6only {
|
||||
setUDPSocketOptions4(conn)
|
||||
}
|
||||
case *syscall.SockaddrInet4:
|
||||
setUDPSocketOptions4(conn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setUDPSocketOptions4 prepares the v4 socket for sessions.
|
||||
func setUDPSocketOptions4(conn *net.UDPConn) error {
|
||||
file, err := conn.File()
|
||||
|
||||
6
vendor/github.com/miekg/dns/udp_other.go
generated
vendored
6
vendor/github.com/miekg/dns/udp_other.go
generated
vendored
@@ -1,17 +1,15 @@
|
||||
// +build !linux,!plan9
|
||||
// +build !linux appengine
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// These do nothing. See udp_linux.go for an example of how to implement this.
|
||||
|
||||
// We tried to adhire to some kind of naming scheme.
|
||||
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
|
||||
func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
|
||||
func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
|
||||
func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
|
||||
func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil }
|
||||
|
||||
34
vendor/github.com/miekg/dns/udp_plan9.go
generated
vendored
34
vendor/github.com/miekg/dns/udp_plan9.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
|
||||
|
||||
// SessionUDP holds the remote address and the associated
|
||||
// out-of-band data.
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
context []byte
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
oob := make([]byte, 40)
|
||||
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||
if err != nil {
|
||||
return n, nil, err
|
||||
}
|
||||
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
9
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
9
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
@@ -8,6 +8,8 @@ type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
}
|
||||
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
@@ -25,10 +27,3 @@ func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, e
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// setUDPSocketOptions sets the UDP socket options.
|
||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
36
vendor/github.com/miekg/dns/update_test.go
generated
vendored
36
vendor/github.com/miekg/dns/update_test.go
generated
vendored
@@ -92,28 +92,28 @@ func TestPreReqAndRemovals(t *testing.T) {
|
||||
m.Id = 1234
|
||||
|
||||
// Use a full set of RRs each time, so we are sure the rdata is stripped.
|
||||
rr_name1, _ := NewRR("name_used. 3600 IN A 127.0.0.1")
|
||||
rr_name2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1")
|
||||
rr_remove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1")
|
||||
rr_remove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1")
|
||||
rr_remove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1")
|
||||
rr_insert, _ := NewRR("insert. 3600 IN A 127.0.0.1")
|
||||
rr_rrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1")
|
||||
rr_rrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1")
|
||||
rr_rrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1")
|
||||
rrName1, _ := NewRR("name_used. 3600 IN A 127.0.0.1")
|
||||
rrName2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1")
|
||||
rrRemove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1")
|
||||
rrRemove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1")
|
||||
rrRemove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1")
|
||||
rrInsert, _ := NewRR("insert. 3600 IN A 127.0.0.1")
|
||||
rrRrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1")
|
||||
rrRrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1")
|
||||
rrRrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1")
|
||||
|
||||
// Handle the prereqs.
|
||||
m.NameUsed([]RR{rr_name1})
|
||||
m.NameNotUsed([]RR{rr_name2})
|
||||
m.RRsetUsed([]RR{rr_rrset1})
|
||||
m.Used([]RR{rr_rrset2})
|
||||
m.RRsetNotUsed([]RR{rr_rrset3})
|
||||
m.NameUsed([]RR{rrName1})
|
||||
m.NameNotUsed([]RR{rrName2})
|
||||
m.RRsetUsed([]RR{rrRrset1})
|
||||
m.Used([]RR{rrRrset2})
|
||||
m.RRsetNotUsed([]RR{rrRrset3})
|
||||
|
||||
// and now the updates.
|
||||
m.RemoveName([]RR{rr_remove1})
|
||||
m.RemoveRRset([]RR{rr_remove2})
|
||||
m.Remove([]RR{rr_remove3})
|
||||
m.Insert([]RR{rr_insert})
|
||||
m.RemoveName([]RR{rrRemove1})
|
||||
m.RemoveRRset([]RR{rrRemove2})
|
||||
m.Remove([]RR{rrRemove3})
|
||||
m.Insert([]RR{rrInsert})
|
||||
|
||||
// This test function isn't a Example function because we print these RR with tabs at the
|
||||
// end and the Example function trim these, thus they never match.
|
||||
|
||||
119
vendor/github.com/miekg/dns/zcompress.go
generated
vendored
Normal file
119
vendor/github.com/miekg/dns/zcompress.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// *** DO NOT MODIFY ***
|
||||
// AUTOGENERATED BY go generate from compress_generate.go
|
||||
|
||||
package dns
|
||||
|
||||
func compressionLenHelperType(c map[string]int, r RR) {
|
||||
switch x := r.(type) {
|
||||
case *KX:
|
||||
compressionLenHelper(c, x.Exchanger)
|
||||
case *MX:
|
||||
compressionLenHelper(c, x.Mx)
|
||||
case *NSEC:
|
||||
compressionLenHelper(c, x.NextDomain)
|
||||
case *DNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *HIP:
|
||||
for i := range x.RendezvousServers {
|
||||
compressionLenHelper(c, x.RendezvousServers[i])
|
||||
}
|
||||
case *CNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *MR:
|
||||
compressionLenHelper(c, x.Mr)
|
||||
case *PX:
|
||||
compressionLenHelper(c, x.Map822)
|
||||
compressionLenHelper(c, x.Mapx400)
|
||||
case *SIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *SRV:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *TALINK:
|
||||
compressionLenHelper(c, x.PreviousName)
|
||||
compressionLenHelper(c, x.NextName)
|
||||
case *LP:
|
||||
compressionLenHelper(c, x.Fqdn)
|
||||
case *NAPTR:
|
||||
compressionLenHelper(c, x.Replacement)
|
||||
case *NS:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
case *RP:
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
compressionLenHelper(c, x.Txt)
|
||||
case *RRSIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *TKEY:
|
||||
compressionLenHelper(c, x.Algorithm)
|
||||
case *TSIG:
|
||||
compressionLenHelper(c, x.Algorithm)
|
||||
case *AFSDB:
|
||||
compressionLenHelper(c, x.Hostname)
|
||||
case *MF:
|
||||
compressionLenHelper(c, x.Mf)
|
||||
case *RT:
|
||||
compressionLenHelper(c, x.Host)
|
||||
case *MINFO:
|
||||
compressionLenHelper(c, x.Rmail)
|
||||
compressionLenHelper(c, x.Email)
|
||||
case *PTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *SOA:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
case *MD:
|
||||
compressionLenHelper(c, x.Md)
|
||||
case *NSAPPTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *MG:
|
||||
compressionLenHelper(c, x.Mg)
|
||||
case *MB:
|
||||
compressionLenHelper(c, x.Mb)
|
||||
}
|
||||
}
|
||||
|
||||
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
|
||||
switch x := r.(type) {
|
||||
case *MF:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mf)
|
||||
return k1, ok1
|
||||
case *MG:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mg)
|
||||
return k1, ok1
|
||||
case *MINFO:
|
||||
k1, ok1 := compressionLenSearch(c, x.Rmail)
|
||||
k2, ok2 := compressionLenSearch(c, x.Email)
|
||||
return k1 + k2, ok1 && ok2
|
||||
case *MR:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mr)
|
||||
return k1, ok1
|
||||
case *PTR:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ptr)
|
||||
return k1, ok1
|
||||
case *AFSDB:
|
||||
k1, ok1 := compressionLenSearch(c, x.Hostname)
|
||||
return k1, ok1
|
||||
case *CNAME:
|
||||
k1, ok1 := compressionLenSearch(c, x.Target)
|
||||
return k1, ok1
|
||||
case *MD:
|
||||
k1, ok1 := compressionLenSearch(c, x.Md)
|
||||
return k1, ok1
|
||||
case *RT:
|
||||
k1, ok1 := compressionLenSearch(c, x.Host)
|
||||
return k1, ok1
|
||||
case *SOA:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
||||
k2, ok2 := compressionLenSearch(c, x.Mbox)
|
||||
return k1 + k2, ok1 && ok2
|
||||
case *MB:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mb)
|
||||
return k1, ok1
|
||||
case *MX:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mx)
|
||||
return k1, ok1
|
||||
case *NS:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
||||
return k1, ok1
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
58
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
58
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
@@ -254,7 +254,7 @@ func (rr *ANY) len() int {
|
||||
}
|
||||
func (rr *CAA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Flag
|
||||
l++ // Flag
|
||||
l += len(rr.Tag) + 1
|
||||
l += len(rr.Value)
|
||||
return l
|
||||
@@ -263,7 +263,7 @@ func (rr *CERT) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // Type
|
||||
l += 2 // KeyTag
|
||||
l += 1 // Algorithm
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
|
||||
return l
|
||||
}
|
||||
@@ -285,16 +285,16 @@ func (rr *DNAME) len() int {
|
||||
func (rr *DNSKEY) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // Flags
|
||||
l += 1 // Protocol
|
||||
l += 1 // Algorithm
|
||||
l++ // Protocol
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
func (rr *DS) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // KeyTag
|
||||
l += 1 // Algorithm
|
||||
l += 1 // DigestType
|
||||
l++ // Algorithm
|
||||
l++ // DigestType
|
||||
l += len(rr.Digest)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -333,8 +333,8 @@ func (rr *HINFO) len() int {
|
||||
}
|
||||
func (rr *HIP) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // HitLength
|
||||
l += 1 // PublicKeyAlgorithm
|
||||
l++ // HitLength
|
||||
l++ // PublicKeyAlgorithm
|
||||
l += 2 // PublicKeyLength
|
||||
l += len(rr.Hit)/2 + 1
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
@@ -363,10 +363,10 @@ func (rr *L64) len() int {
|
||||
}
|
||||
func (rr *LOC) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Version
|
||||
l += 1 // Size
|
||||
l += 1 // HorizPre
|
||||
l += 1 // VertPre
|
||||
l++ // Version
|
||||
l++ // Size
|
||||
l++ // HorizPre
|
||||
l++ // VertPre
|
||||
l += 4 // Latitude
|
||||
l += 4 // Longitude
|
||||
l += 4 // Altitude
|
||||
@@ -455,10 +455,10 @@ func (rr *NSAPPTR) len() int {
|
||||
}
|
||||
func (rr *NSEC3PARAM) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Hash
|
||||
l += 1 // Flags
|
||||
l++ // Hash
|
||||
l++ // Flags
|
||||
l += 2 // Iterations
|
||||
l += 1 // SaltLength
|
||||
l++ // SaltLength
|
||||
l += len(rr.Salt)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -487,8 +487,8 @@ func (rr *RFC3597) len() int {
|
||||
func (rr *RKEY) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // Flags
|
||||
l += 1 // Protocol
|
||||
l += 1 // Algorithm
|
||||
l++ // Protocol
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
@@ -501,8 +501,8 @@ func (rr *RP) len() int {
|
||||
func (rr *RRSIG) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // TypeCovered
|
||||
l += 1 // Algorithm
|
||||
l += 1 // Labels
|
||||
l++ // Algorithm
|
||||
l++ // Labels
|
||||
l += 4 // OrigTtl
|
||||
l += 4 // Expiration
|
||||
l += 4 // Inception
|
||||
@@ -519,9 +519,9 @@ func (rr *RT) len() int {
|
||||
}
|
||||
func (rr *SMIMEA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Usage
|
||||
l += 1 // Selector
|
||||
l += 1 // MatchingType
|
||||
l++ // Usage
|
||||
l++ // Selector
|
||||
l++ // MatchingType
|
||||
l += len(rr.Certificate)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -553,16 +553,16 @@ func (rr *SRV) len() int {
|
||||
}
|
||||
func (rr *SSHFP) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Algorithm
|
||||
l += 1 // Type
|
||||
l++ // Algorithm
|
||||
l++ // Type
|
||||
l += len(rr.FingerPrint)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *TA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // KeyTag
|
||||
l += 1 // Algorithm
|
||||
l += 1 // DigestType
|
||||
l++ // Algorithm
|
||||
l++ // DigestType
|
||||
l += len(rr.Digest)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -587,9 +587,9 @@ func (rr *TKEY) len() int {
|
||||
}
|
||||
func (rr *TLSA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Usage
|
||||
l += 1 // Selector
|
||||
l += 1 // MatchingType
|
||||
l++ // Usage
|
||||
l++ // Selector
|
||||
l++ // MatchingType
|
||||
l += len(rr.Certificate)/2 + 1
|
||||
return l
|
||||
}
|
||||
|
||||
13
vendor/github.com/prometheus/client_model/AUTHORS.md
generated
vendored
13
vendor/github.com/prometheus/client_model/AUTHORS.md
generated
vendored
@@ -1,13 +0,0 @@
|
||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Matt T. Proud <matt.proud@gmail.com>
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
8
vendor/github.com/prometheus/client_model/CONTRIBUTING.md
generated
vendored
8
vendor/github.com/prometheus/client_model/CONTRIBUTING.md
generated
vendored
@@ -2,16 +2,16 @@
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||
request, addressing (with `@...`) one or more of the maintainers
|
||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||
addressing (with `@...`) the maintainer of this repository (see
|
||||
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
This will avoid unnecessary work and surely give you and us a good deal
|
||||
of inspiration.
|
||||
|
||||
* Relevant coding style guidelines for the Go parts are the [Go Code Review
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
|
||||
1
vendor/github.com/prometheus/client_model/MAINTAINERS.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/client_model/MAINTAINERS.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
11
vendor/github.com/prometheus/common/AUTHORS.md
generated
vendored
11
vendor/github.com/prometheus/common/AUTHORS.md
generated
vendored
@@ -1,11 +0,0 @@
|
||||
Maintainers of this repository:
|
||||
|
||||
* Fabian Reinartz <fabian@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Fabian Reinartz <fabian@soundcloud.com>
|
||||
* Julius Volz <julius.volz@gmail.com>
|
||||
* Miguel Molina <hi@mvader.me>
|
||||
6
vendor/github.com/prometheus/common/CONTRIBUTING.md
generated
vendored
6
vendor/github.com/prometheus/common/CONTRIBUTING.md
generated
vendored
@@ -2,9 +2,9 @@
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||
request, addressing (with `@...`) one or more of the maintainers
|
||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||
addressing (with `@...`) the maintainer of this repository (see
|
||||
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
|
||||
1
vendor/github.com/prometheus/common/MAINTAINERS.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/MAINTAINERS.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* Fabian Reinartz <fabian.reinartz@coreos.com>
|
||||
5
vendor/github.com/prometheus/common/model/value.go
generated
vendored
5
vendor/github.com/prometheus/common/model/value.go
generated
vendored
@@ -129,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
|
||||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if s.Value.Equal(o.Value) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return s.Value.Equal(o.Value)
|
||||
}
|
||||
|
||||
func (s Sample) String() string {
|
||||
|
||||
53
vendor/github.com/prometheus/common/model/value_test.go
generated
vendored
53
vendor/github.com/prometheus/common/model/value_test.go
generated
vendored
@@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
func TestEqualValues(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
in1, in2 SampleValue
|
||||
want bool
|
||||
@@ -76,6 +76,57 @@ func TestEqual(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEqualSamples(t *testing.T) {
|
||||
testSample := &Sample{}
|
||||
|
||||
tests := map[string]struct {
|
||||
in1, in2 *Sample
|
||||
want bool
|
||||
}{
|
||||
"equal pointers": {
|
||||
in1: testSample,
|
||||
in2: testSample,
|
||||
want: true,
|
||||
},
|
||||
"different metrics": {
|
||||
in1: &Sample{Metric: Metric{"foo": "bar"}},
|
||||
in2: &Sample{Metric: Metric{"foo": "biz"}},
|
||||
want: false,
|
||||
},
|
||||
"different timestamp": {
|
||||
in1: &Sample{Timestamp: 0},
|
||||
in2: &Sample{Timestamp: 1},
|
||||
want: false,
|
||||
},
|
||||
"different value": {
|
||||
in1: &Sample{Value: 0},
|
||||
in2: &Sample{Value: 1},
|
||||
want: false,
|
||||
},
|
||||
"equal samples": {
|
||||
in1: &Sample{
|
||||
Metric: Metric{"foo": "bar"},
|
||||
Timestamp: 0,
|
||||
Value: 1,
|
||||
},
|
||||
in2: &Sample{
|
||||
Metric: Metric{"foo": "bar"},
|
||||
Timestamp: 0,
|
||||
Value: 1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
got := test.in1.Equal(test.in2)
|
||||
if got != test.want {
|
||||
t.Errorf("Comparing %s, %v and %v: got %t, want %t", name, test.in1, test.in2, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSamplePairJSON(t *testing.T) {
|
||||
input := []struct {
|
||||
plain string
|
||||
|
||||
21
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
21
vendor/github.com/prometheus/procfs/AUTHORS.md
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The Prometheus project was started by Matt T. Proud (emeritus) and
|
||||
Julius Volz in 2012.
|
||||
|
||||
Maintainers of this repository:
|
||||
|
||||
* Tobias Schmidt <ts@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Armen Baghumian <abaghumian@noggin.com.au>
|
||||
* Bjoern Rabenstein <beorn@soundcloud.com>
|
||||
* David Cournapeau <cournape@gmail.com>
|
||||
* Ji-Hoon, Seol <jihoon.seol@gmail.com>
|
||||
* Jonas Große Sundrup <cherti@letopolis.de>
|
||||
* Julius Volz <julius.volz@gmail.com>
|
||||
* Matt Layher <mdlayher@gmail.com>
|
||||
* Matthias Rampke <mr@soundcloud.com>
|
||||
* Nicky Gerritsen <nicky@streamone.nl>
|
||||
* Rémi Audebert <contact@halfr.net>
|
||||
* Tobias Schmidt <tobidt@gmail.com>
|
||||
6
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
6
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
@@ -2,9 +2,9 @@
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||
request, addressing (with `@...`) one or more of the maintainers
|
||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||
addressing (with `@...`) the maintainer of this repository (see
|
||||
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
|
||||
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
Normal file
1
vendor/github.com/prometheus/procfs/MAINTAINERS.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* Tobias Schmidt <tobidt@gmail.com>
|
||||
1
vendor/github.com/prometheus/procfs/README.md
generated
vendored
1
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/procfs)
|
||||
[](https://travis-ci.org/prometheus/procfs)
|
||||
[](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||
|
||||
95
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
Normal file
95
vendor/github.com/prometheus/procfs/buddyinfo.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A BuddyInfo is the details parsed from /proc/buddyinfo.
|
||||
// The data is comprised of an array of free fragments of each size.
|
||||
// The sizes are 2^n*PAGE_SIZE, where n is the array index.
|
||||
type BuddyInfo struct {
|
||||
Node string
|
||||
Zone string
|
||||
Sizes []float64
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics.
|
||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewBuddyInfo()
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
file, err := os.Open(fs.Path("buddyinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseBuddyInfo(file)
|
||||
}
|
||||
|
||||
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||
var (
|
||||
buddyInfo = []BuddyInfo{}
|
||||
scanner = bufio.NewScanner(r)
|
||||
bucketCount = -1
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
var err error
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(string(line))
|
||||
|
||||
if len(parts) < 4 {
|
||||
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
|
||||
}
|
||||
|
||||
node := strings.TrimRight(parts[1], ",")
|
||||
zone := strings.TrimRight(parts[3], ",")
|
||||
arraySize := len(parts[4:])
|
||||
|
||||
if bucketCount == -1 {
|
||||
bucketCount = arraySize
|
||||
} else {
|
||||
if bucketCount != arraySize {
|
||||
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
|
||||
}
|
||||
}
|
||||
|
||||
sizes := make([]float64, arraySize)
|
||||
for i := 0; i < arraySize; i++ {
|
||||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
|
||||
}
|
||||
|
||||
return buddyInfo, scanner.Err()
|
||||
}
|
||||
64
vendor/github.com/prometheus/procfs/buddyinfo_test.go
generated
vendored
Normal file
64
vendor/github.com/prometheus/procfs/buddyinfo_test.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBuddyInfo(t *testing.T) {
|
||||
buddyInfo, err := FS("fixtures/buddyinfo/valid").NewBuddyInfo()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := "DMA", buddyInfo[0].Zone; want != got {
|
||||
t.Errorf("want Node 0, Zone %s, got %s", want, got)
|
||||
}
|
||||
|
||||
if want, got := "Normal", buddyInfo[2].Zone; want != got {
|
||||
t.Errorf("want Node 0, Zone %s, got %s", want, got)
|
||||
}
|
||||
|
||||
if want, got := 4381.0, buddyInfo[2].Sizes[0]; want != got {
|
||||
t.Errorf("want Node 0, Zone Normal %f, got %f", want, got)
|
||||
}
|
||||
|
||||
if want, got := 572.0, buddyInfo[1].Sizes[1]; want != got {
|
||||
t.Errorf("want Node 0, Zone DMA32 %f, got %f", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuddyInfoShort(t *testing.T) {
|
||||
_, err := FS("fixtures/buddyinfo/short").NewBuddyInfo()
|
||||
if err == nil {
|
||||
t.Errorf("expected error, but none occurred")
|
||||
}
|
||||
|
||||
if want, got := "invalid number of fields when parsing buddyinfo", err.Error(); want != got {
|
||||
t.Errorf("wrong error returned, wanted %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuddyInfoSizeMismatch(t *testing.T) {
|
||||
_, err := FS("fixtures/buddyinfo/sizemismatch").NewBuddyInfo()
|
||||
if err == nil {
|
||||
t.Errorf("expected error, but none occurred")
|
||||
}
|
||||
|
||||
if want, got := "mismatch in number of buddyinfo buckets", err.Error(); !strings.HasPrefix(got, want) {
|
||||
t.Errorf("wrong error returned, wanted prefix %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
3
vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo
generated
vendored
Normal file
3
vendor/github.com/prometheus/procfs/fixtures/buddyinfo/short/buddyinfo
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
Node 0, zone
|
||||
Node 0, zone
|
||||
Node 0, zone
|
||||
3
vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo
generated
vendored
Normal file
3
vendor/github.com/prometheus/procfs/fixtures/buddyinfo/sizemismatch/buddyinfo
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
|
||||
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
|
||||
3
vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo
generated
vendored
Normal file
3
vendor/github.com/prometheus/procfs/fixtures/buddyinfo/valid/buddyinfo
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
|
||||
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
|
||||
23
vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat
generated
vendored
Normal file
23
vendor/github.com/prometheus/procfs/fixtures/fs/xfs/stat
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
extent_alloc 92447 97589 92448 93751
|
||||
abt 0 0 0 0
|
||||
blk_map 1767055 188820 184891 92447 92448 2140766 0
|
||||
bmbt 0 0 0 0
|
||||
dir 185039 92447 92444 136422
|
||||
trans 706 944304 0
|
||||
ig 185045 58807 0 126238 0 33637 22
|
||||
log 2883 113448 9 17360 739
|
||||
push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
|
||||
xstrat 92447 0
|
||||
rw 107739 94045
|
||||
attr 4 0 0 0
|
||||
icluster 8677 7849 135802
|
||||
vnodes 92601 0 0 0 92444 92444 92444 0
|
||||
buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
|
||||
abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
|
||||
abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
|
||||
bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
qm 0 0 0 0 0 0 0 0
|
||||
xpc 399724544 92823103 86219234
|
||||
debug 0
|
||||
13
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
13
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/prometheus/procfs/xfs"
|
||||
)
|
||||
|
||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
||||
@@ -31,3 +33,14 @@ func NewFS(mountPoint string) (FS, error) {
|
||||
func (fs FS) Path(p ...string) string {
|
||||
return path.Join(append([]string{string(fs)}, p...)...)
|
||||
}
|
||||
|
||||
// XFSStats retrieves XFS filesystem runtime statistics.
|
||||
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
||||
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return xfs.ParseStats(f)
|
||||
}
|
||||
|
||||
13
vendor/github.com/prometheus/procfs/fs_test.go
generated
vendored
13
vendor/github.com/prometheus/procfs/fs_test.go
generated
vendored
@@ -11,3 +11,16 @@ func TestNewFS(t *testing.T) {
|
||||
t.Error("want NewFS to fail if mount point is not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSXFSStats(t *testing.T) {
|
||||
stats, err := FS("fixtures").XFSStats()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse XFS stats: %v", err)
|
||||
}
|
||||
|
||||
// Very lightweight test just to sanity check the path used
|
||||
// to open XFS stats. Heavier tests in package xfs.
|
||||
if want, got := uint32(92447), stats.ExtentAllocation.ExtentsAllocated; want != got {
|
||||
t.Errorf("unexpected extents allocated:\nwant: %d\nhave: %d", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
16
vendor/github.com/prometheus/procfs/ipvs_test.go
generated
vendored
16
vendor/github.com/prometheus/procfs/ipvs_test.go
generated
vendored
@@ -14,7 +14,7 @@ var (
|
||||
OutgoingBytes: 0,
|
||||
}
|
||||
expectedIPVSBackendStatuses = []IPVSBackendStatus{
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.22"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.82.22"),
|
||||
@@ -24,7 +24,7 @@ var (
|
||||
ActiveConn: 248,
|
||||
InactConn: 2,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.22"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.83.24"),
|
||||
@@ -34,7 +34,7 @@ var (
|
||||
ActiveConn: 248,
|
||||
InactConn: 2,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.22"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.83.21"),
|
||||
@@ -44,7 +44,7 @@ var (
|
||||
ActiveConn: 248,
|
||||
InactConn: 1,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.57"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.84.22"),
|
||||
@@ -54,7 +54,7 @@ var (
|
||||
ActiveConn: 0,
|
||||
InactConn: 0,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.57"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.82.21"),
|
||||
@@ -64,7 +64,7 @@ var (
|
||||
ActiveConn: 1499,
|
||||
InactConn: 0,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.57"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.50.21"),
|
||||
@@ -74,7 +74,7 @@ var (
|
||||
ActiveConn: 1498,
|
||||
InactConn: 0,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.55"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.50.26"),
|
||||
@@ -84,7 +84,7 @@ var (
|
||||
ActiveConn: 0,
|
||||
InactConn: 0,
|
||||
},
|
||||
IPVSBackendStatus{
|
||||
{
|
||||
LocalAddress: net.ParseIP("192.168.0.55"),
|
||||
LocalPort: 3306,
|
||||
RemoteAddress: net.ParseIP("192.168.49.32"),
|
||||
|
||||
14
vendor/github.com/prometheus/procfs/mdstat_test.go
generated
vendored
14
vendor/github.com/prometheus/procfs/mdstat_test.go
generated
vendored
@@ -11,13 +11,13 @@ func TestMDStat(t *testing.T) {
|
||||
}
|
||||
|
||||
refs := map[string]MDStat{
|
||||
"md3": MDStat{"md3", "active", 8, 8, 5853468288, 5853468288},
|
||||
"md127": MDStat{"md127", "active", 2, 2, 312319552, 312319552},
|
||||
"md0": MDStat{"md0", "active", 2, 2, 248896, 248896},
|
||||
"md4": MDStat{"md4", "inactive", 2, 2, 4883648, 4883648},
|
||||
"md6": MDStat{"md6", "active", 1, 2, 195310144, 16775552},
|
||||
"md8": MDStat{"md8", "active", 2, 2, 195310144, 16775552},
|
||||
"md7": MDStat{"md7", "active", 3, 4, 7813735424, 7813735424},
|
||||
"md3": {"md3", "active", 8, 8, 5853468288, 5853468288},
|
||||
"md127": {"md127", "active", 2, 2, 312319552, 312319552},
|
||||
"md0": {"md0", "active", 2, 2, 248896, 248896},
|
||||
"md4": {"md4", "inactive", 2, 2, 4883648, 4883648},
|
||||
"md6": {"md6", "active", 1, 2, 195310144, 16775552},
|
||||
"md8": {"md8", "active", 2, 2, 195310144, 16775552},
|
||||
"md7": {"md7", "active", 3, 4, 7813735424, 7813735424},
|
||||
}
|
||||
|
||||
if want, have := len(refs), len(mdStates); want != have {
|
||||
|
||||
4
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
4
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@@ -123,7 +123,7 @@ type NFSEventsStats struct {
|
||||
VFSFlush uint64
|
||||
// Number of times fsync() has been called on directories and files.
|
||||
VFSFsync uint64
|
||||
// Number of times locking has been attemped on a file.
|
||||
// Number of times locking has been attempted on a file.
|
||||
VFSLock uint64
|
||||
// Number of times files have been closed and released.
|
||||
VFSFileRelease uint64
|
||||
@@ -356,7 +356,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
|
||||
}
|
||||
|
||||
// When encountering "per-operation statistics", we must break this
|
||||
// loop and parse them seperately to ensure we can terminate parsing
|
||||
// loop and parse them separately to ensure we can terminate parsing
|
||||
// before reaching another device entry; hence why this 'if' statement
|
||||
// is not just another switch case
|
||||
if ss[0] == fieldPerOpStats {
|
||||
|
||||
9
vendor/github.com/prometheus/procfs/mountstats_test.go
generated
vendored
9
vendor/github.com/prometheus/procfs/mountstats_test.go
generated
vendored
@@ -12,7 +12,6 @@ func TestMountStats(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
fs bool
|
||||
mounts []*Mount
|
||||
invalid bool
|
||||
}{
|
||||
@@ -113,7 +112,6 @@ func TestMountStats(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "fixtures OK",
|
||||
fs: true,
|
||||
mounts: []*Mount{
|
||||
{
|
||||
Device: "rootfs",
|
||||
@@ -201,10 +199,9 @@ func TestMountStats(t *testing.T) {
|
||||
|
||||
if tt.s != "" {
|
||||
mounts, err = parseMountStats(strings.NewReader(tt.s))
|
||||
}
|
||||
if tt.fs {
|
||||
proc, err := FS("fixtures").NewProc(26231)
|
||||
if err != nil {
|
||||
} else {
|
||||
proc, e := FS("fixtures").NewProc(26231)
|
||||
if e != nil {
|
||||
t.Fatalf("failed to create proc: %v", err)
|
||||
}
|
||||
|
||||
|
||||
361
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
Normal file
361
vendor/github.com/prometheus/procfs/xfs/parse.go
generated
vendored
Normal file
@@ -0,0 +1,361 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package xfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
||||
// found in /proc/fs/xfs/stat.
|
||||
func ParseStats(r io.Reader) (*Stats, error) {
|
||||
const (
|
||||
// Fields parsed into stats structures.
|
||||
fieldExtentAlloc = "extent_alloc"
|
||||
fieldAbt = "abt"
|
||||
fieldBlkMap = "blk_map"
|
||||
fieldBmbt = "bmbt"
|
||||
fieldDir = "dir"
|
||||
fieldTrans = "trans"
|
||||
fieldIg = "ig"
|
||||
fieldLog = "log"
|
||||
fieldRw = "rw"
|
||||
fieldAttr = "attr"
|
||||
fieldIcluster = "icluster"
|
||||
fieldVnodes = "vnodes"
|
||||
fieldBuf = "buf"
|
||||
fieldXpc = "xpc"
|
||||
|
||||
// Unimplemented at this time due to lack of documentation.
|
||||
fieldPushAil = "push_ail"
|
||||
fieldXstrat = "xstrat"
|
||||
fieldAbtb2 = "abtb2"
|
||||
fieldAbtc2 = "abtc2"
|
||||
fieldBmbt2 = "bmbt2"
|
||||
fieldIbt2 = "ibt2"
|
||||
fieldFibt2 = "fibt2"
|
||||
fieldQm = "qm"
|
||||
fieldDebug = "debug"
|
||||
)
|
||||
|
||||
var xfss Stats
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Expect at least a string label and a single integer value, ex:
|
||||
// - abt 0
|
||||
// - rw 1 2
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) < 2 {
|
||||
continue
|
||||
}
|
||||
label := ss[0]
|
||||
|
||||
// Extended precision counters are uint64 values.
|
||||
if label == fieldXpc {
|
||||
us, err := parseUint64s(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// All other counters are uint32 values.
|
||||
us, err := parseUint32s(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch label {
|
||||
case fieldExtentAlloc:
|
||||
xfss.ExtentAllocation, err = extentAllocationStats(us)
|
||||
case fieldAbt:
|
||||
xfss.AllocationBTree, err = btreeStats(us)
|
||||
case fieldBlkMap:
|
||||
xfss.BlockMapping, err = blockMappingStats(us)
|
||||
case fieldBmbt:
|
||||
xfss.BlockMapBTree, err = btreeStats(us)
|
||||
case fieldDir:
|
||||
xfss.DirectoryOperation, err = directoryOperationStats(us)
|
||||
case fieldTrans:
|
||||
xfss.Transaction, err = transactionStats(us)
|
||||
case fieldIg:
|
||||
xfss.InodeOperation, err = inodeOperationStats(us)
|
||||
case fieldLog:
|
||||
xfss.LogOperation, err = logOperationStats(us)
|
||||
case fieldRw:
|
||||
xfss.ReadWrite, err = readWriteStats(us)
|
||||
case fieldAttr:
|
||||
xfss.AttributeOperation, err = attributeOperationStats(us)
|
||||
case fieldIcluster:
|
||||
xfss.InodeClustering, err = inodeClusteringStats(us)
|
||||
case fieldVnodes:
|
||||
xfss.Vnode, err = vnodeStats(us)
|
||||
case fieldBuf:
|
||||
xfss.Buffer, err = bufferStats(us)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &xfss, s.Err()
|
||||
}
|
||||
|
||||
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
||||
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
|
||||
}
|
||||
|
||||
return ExtentAllocationStats{
|
||||
ExtentsAllocated: us[0],
|
||||
BlocksAllocated: us[1],
|
||||
ExtentsFreed: us[2],
|
||||
BlocksFreed: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// btreeStats builds a BTreeStats from a slice of uint32s.
|
||||
func btreeStats(us []uint32) (BTreeStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
|
||||
}
|
||||
|
||||
return BTreeStats{
|
||||
Lookups: us[0],
|
||||
Compares: us[1],
|
||||
RecordsInserted: us[2],
|
||||
RecordsDeleted: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
||||
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
|
||||
if l := len(us); l != 7 {
|
||||
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
|
||||
}
|
||||
|
||||
return BlockMappingStats{
|
||||
Reads: us[0],
|
||||
Writes: us[1],
|
||||
Unmaps: us[2],
|
||||
ExtentListInsertions: us[3],
|
||||
ExtentListDeletions: us[4],
|
||||
ExtentListLookups: us[5],
|
||||
ExtentListCompares: us[6],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
||||
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
|
||||
}
|
||||
|
||||
return DirectoryOperationStats{
|
||||
Lookups: us[0],
|
||||
Creates: us[1],
|
||||
Removes: us[2],
|
||||
Getdents: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
||||
func transactionStats(us []uint32) (TransactionStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
|
||||
}
|
||||
|
||||
return TransactionStats{
|
||||
Sync: us[0],
|
||||
Async: us[1],
|
||||
Empty: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
||||
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
|
||||
if l := len(us); l != 7 {
|
||||
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
|
||||
}
|
||||
|
||||
return InodeOperationStats{
|
||||
Attempts: us[0],
|
||||
Found: us[1],
|
||||
Recycle: us[2],
|
||||
Missed: us[3],
|
||||
Duplicate: us[4],
|
||||
Reclaims: us[5],
|
||||
AttributeChange: us[6],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
||||
func logOperationStats(us []uint32) (LogOperationStats, error) {
|
||||
if l := len(us); l != 5 {
|
||||
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
|
||||
}
|
||||
|
||||
return LogOperationStats{
|
||||
Writes: us[0],
|
||||
Blocks: us[1],
|
||||
NoInternalBuffers: us[2],
|
||||
Force: us[3],
|
||||
ForceSleep: us[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
||||
func readWriteStats(us []uint32) (ReadWriteStats, error) {
|
||||
if l := len(us); l != 2 {
|
||||
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
|
||||
}
|
||||
|
||||
return ReadWriteStats{
|
||||
Read: us[0],
|
||||
Write: us[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
||||
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
|
||||
}
|
||||
|
||||
return AttributeOperationStats{
|
||||
Get: us[0],
|
||||
Set: us[1],
|
||||
Remove: us[2],
|
||||
List: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
||||
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
|
||||
}
|
||||
|
||||
return InodeClusteringStats{
|
||||
Iflush: us[0],
|
||||
Flush: us[1],
|
||||
FlushInode: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
||||
func vnodeStats(us []uint32) (VnodeStats, error) {
|
||||
// The attribute "Free" appears to not be available on older XFS
|
||||
// stats versions. Therefore, 7 or 8 elements may appear in
|
||||
// this slice.
|
||||
l := len(us)
|
||||
log.Println(l)
|
||||
if l != 7 && l != 8 {
|
||||
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
|
||||
}
|
||||
|
||||
s := VnodeStats{
|
||||
Active: us[0],
|
||||
Allocate: us[1],
|
||||
Get: us[2],
|
||||
Hold: us[3],
|
||||
Release: us[4],
|
||||
Reclaim: us[5],
|
||||
Remove: us[6],
|
||||
}
|
||||
|
||||
// Skip adding free, unless it is present. The zero value will
|
||||
// be used in place of an actual count.
|
||||
if l == 7 {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
s.Free = us[7]
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// BufferStats builds a BufferStats from a slice of uint32s.
|
||||
func bufferStats(us []uint32) (BufferStats, error) {
|
||||
if l := len(us); l != 9 {
|
||||
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
|
||||
}
|
||||
|
||||
return BufferStats{
|
||||
Get: us[0],
|
||||
Create: us[1],
|
||||
GetLocked: us[2],
|
||||
GetLockedWaited: us[3],
|
||||
BusyLocked: us[4],
|
||||
MissLocked: us[5],
|
||||
PageRetries: us[6],
|
||||
PageFound: us[7],
|
||||
GetRead: us[8],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
||||
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
|
||||
}
|
||||
|
||||
return ExtendedPrecisionStats{
|
||||
FlushBytes: us[0],
|
||||
WriteBytes: us[1],
|
||||
ReadBytes: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseUint32s parses a slice of strings into a slice of uint32s.
|
||||
func parseUint32s(ss []string) ([]uint32, error) {
|
||||
us := make([]uint32, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, uint32(u))
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
// parseUint64s parses a slice of strings into a slice of uint64s.
|
||||
func parseUint64s(ss []string) ([]uint64, error) {
|
||||
us := make([]uint64, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, u)
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
446
vendor/github.com/prometheus/procfs/xfs/parse_test.go
generated
vendored
Normal file
446
vendor/github.com/prometheus/procfs/xfs/parse_test.go
generated
vendored
Normal file
@@ -0,0 +1,446 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package xfs_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/procfs"
|
||||
"github.com/prometheus/procfs/xfs"
|
||||
)
|
||||
|
||||
func TestParseStats(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
fs bool
|
||||
stats *xfs.Stats
|
||||
invalid bool
|
||||
}{
|
||||
{
|
||||
name: "empty file OK",
|
||||
},
|
||||
{
|
||||
name: "short or empty lines and unknown labels ignored",
|
||||
s: "one\n\ntwo 1 2 3\n",
|
||||
stats: &xfs.Stats{},
|
||||
},
|
||||
{
|
||||
name: "bad uint32",
|
||||
s: "extent_alloc XXX",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "bad uint64",
|
||||
s: "xpc XXX",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "extent_alloc bad",
|
||||
s: "extent_alloc 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "extent_alloc OK",
|
||||
s: "extent_alloc 1 2 3 4",
|
||||
stats: &xfs.Stats{
|
||||
ExtentAllocation: xfs.ExtentAllocationStats{
|
||||
ExtentsAllocated: 1,
|
||||
BlocksAllocated: 2,
|
||||
ExtentsFreed: 3,
|
||||
BlocksFreed: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "abt bad",
|
||||
s: "abt 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "abt OK",
|
||||
s: "abt 1 2 3 4",
|
||||
stats: &xfs.Stats{
|
||||
AllocationBTree: xfs.BTreeStats{
|
||||
Lookups: 1,
|
||||
Compares: 2,
|
||||
RecordsInserted: 3,
|
||||
RecordsDeleted: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blk_map bad",
|
||||
s: "blk_map 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "blk_map OK",
|
||||
s: "blk_map 1 2 3 4 5 6 7",
|
||||
stats: &xfs.Stats{
|
||||
BlockMapping: xfs.BlockMappingStats{
|
||||
Reads: 1,
|
||||
Writes: 2,
|
||||
Unmaps: 3,
|
||||
ExtentListInsertions: 4,
|
||||
ExtentListDeletions: 5,
|
||||
ExtentListLookups: 6,
|
||||
ExtentListCompares: 7,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bmbt bad",
|
||||
s: "bmbt 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "bmbt OK",
|
||||
s: "bmbt 1 2 3 4",
|
||||
stats: &xfs.Stats{
|
||||
BlockMapBTree: xfs.BTreeStats{
|
||||
Lookups: 1,
|
||||
Compares: 2,
|
||||
RecordsInserted: 3,
|
||||
RecordsDeleted: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dir bad",
|
||||
s: "dir 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "dir OK",
|
||||
s: "dir 1 2 3 4",
|
||||
stats: &xfs.Stats{
|
||||
DirectoryOperation: xfs.DirectoryOperationStats{
|
||||
Lookups: 1,
|
||||
Creates: 2,
|
||||
Removes: 3,
|
||||
Getdents: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "trans bad",
|
||||
s: "trans 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "trans OK",
|
||||
s: "trans 1 2 3",
|
||||
stats: &xfs.Stats{
|
||||
Transaction: xfs.TransactionStats{
|
||||
Sync: 1,
|
||||
Async: 2,
|
||||
Empty: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ig bad",
|
||||
s: "ig 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "ig OK",
|
||||
s: "ig 1 2 3 4 5 6 7",
|
||||
stats: &xfs.Stats{
|
||||
InodeOperation: xfs.InodeOperationStats{
|
||||
Attempts: 1,
|
||||
Found: 2,
|
||||
Recycle: 3,
|
||||
Missed: 4,
|
||||
Duplicate: 5,
|
||||
Reclaims: 6,
|
||||
AttributeChange: 7,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "log bad",
|
||||
s: "log 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "log OK",
|
||||
s: "log 1 2 3 4 5",
|
||||
stats: &xfs.Stats{
|
||||
LogOperation: xfs.LogOperationStats{
|
||||
Writes: 1,
|
||||
Blocks: 2,
|
||||
NoInternalBuffers: 3,
|
||||
Force: 4,
|
||||
ForceSleep: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rw bad",
|
||||
s: "rw 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "rw OK",
|
||||
s: "rw 1 2",
|
||||
stats: &xfs.Stats{
|
||||
ReadWrite: xfs.ReadWriteStats{
|
||||
Read: 1,
|
||||
Write: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attr bad",
|
||||
s: "attr 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "attr OK",
|
||||
s: "attr 1 2 3 4",
|
||||
stats: &xfs.Stats{
|
||||
AttributeOperation: xfs.AttributeOperationStats{
|
||||
Get: 1,
|
||||
Set: 2,
|
||||
Remove: 3,
|
||||
List: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "icluster bad",
|
||||
s: "icluster 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "icluster OK",
|
||||
s: "icluster 1 2 3",
|
||||
stats: &xfs.Stats{
|
||||
InodeClustering: xfs.InodeClusteringStats{
|
||||
Iflush: 1,
|
||||
Flush: 2,
|
||||
FlushInode: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vnodes bad",
|
||||
s: "vnodes 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "vnodes (missing free) OK",
|
||||
s: "vnodes 1 2 3 4 5 6 7",
|
||||
stats: &xfs.Stats{
|
||||
Vnode: xfs.VnodeStats{
|
||||
Active: 1,
|
||||
Allocate: 2,
|
||||
Get: 3,
|
||||
Hold: 4,
|
||||
Release: 5,
|
||||
Reclaim: 6,
|
||||
Remove: 7,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "vnodes (with free) OK",
|
||||
s: "vnodes 1 2 3 4 5 6 7 8",
|
||||
stats: &xfs.Stats{
|
||||
Vnode: xfs.VnodeStats{
|
||||
Active: 1,
|
||||
Allocate: 2,
|
||||
Get: 3,
|
||||
Hold: 4,
|
||||
Release: 5,
|
||||
Reclaim: 6,
|
||||
Remove: 7,
|
||||
Free: 8,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "buf bad",
|
||||
s: "buf 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "buf OK",
|
||||
s: "buf 1 2 3 4 5 6 7 8 9",
|
||||
stats: &xfs.Stats{
|
||||
Buffer: xfs.BufferStats{
|
||||
Get: 1,
|
||||
Create: 2,
|
||||
GetLocked: 3,
|
||||
GetLockedWaited: 4,
|
||||
BusyLocked: 5,
|
||||
MissLocked: 6,
|
||||
PageRetries: 7,
|
||||
PageFound: 8,
|
||||
GetRead: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "xpc bad",
|
||||
s: "xpc 1",
|
||||
invalid: true,
|
||||
},
|
||||
{
|
||||
name: "xpc OK",
|
||||
s: "xpc 1 2 3",
|
||||
stats: &xfs.Stats{
|
||||
ExtendedPrecision: xfs.ExtendedPrecisionStats{
|
||||
FlushBytes: 1,
|
||||
WriteBytes: 2,
|
||||
ReadBytes: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fixtures OK",
|
||||
fs: true,
|
||||
stats: &xfs.Stats{
|
||||
ExtentAllocation: xfs.ExtentAllocationStats{
|
||||
ExtentsAllocated: 92447,
|
||||
BlocksAllocated: 97589,
|
||||
ExtentsFreed: 92448,
|
||||
BlocksFreed: 93751,
|
||||
},
|
||||
AllocationBTree: xfs.BTreeStats{
|
||||
Lookups: 0,
|
||||
Compares: 0,
|
||||
RecordsInserted: 0,
|
||||
RecordsDeleted: 0,
|
||||
},
|
||||
BlockMapping: xfs.BlockMappingStats{
|
||||
Reads: 1767055,
|
||||
Writes: 188820,
|
||||
Unmaps: 184891,
|
||||
ExtentListInsertions: 92447,
|
||||
ExtentListDeletions: 92448,
|
||||
ExtentListLookups: 2140766,
|
||||
ExtentListCompares: 0,
|
||||
},
|
||||
BlockMapBTree: xfs.BTreeStats{
|
||||
Lookups: 0,
|
||||
Compares: 0,
|
||||
RecordsInserted: 0,
|
||||
RecordsDeleted: 0,
|
||||
},
|
||||
DirectoryOperation: xfs.DirectoryOperationStats{
|
||||
Lookups: 185039,
|
||||
Creates: 92447,
|
||||
Removes: 92444,
|
||||
Getdents: 136422,
|
||||
},
|
||||
Transaction: xfs.TransactionStats{
|
||||
Sync: 706,
|
||||
Async: 944304,
|
||||
Empty: 0,
|
||||
},
|
||||
InodeOperation: xfs.InodeOperationStats{
|
||||
Attempts: 185045,
|
||||
Found: 58807,
|
||||
Recycle: 0,
|
||||
Missed: 126238,
|
||||
Duplicate: 0,
|
||||
Reclaims: 33637,
|
||||
AttributeChange: 22,
|
||||
},
|
||||
LogOperation: xfs.LogOperationStats{
|
||||
Writes: 2883,
|
||||
Blocks: 113448,
|
||||
NoInternalBuffers: 9,
|
||||
Force: 17360,
|
||||
ForceSleep: 739,
|
||||
},
|
||||
ReadWrite: xfs.ReadWriteStats{
|
||||
Read: 107739,
|
||||
Write: 94045,
|
||||
},
|
||||
AttributeOperation: xfs.AttributeOperationStats{
|
||||
Get: 4,
|
||||
Set: 0,
|
||||
Remove: 0,
|
||||
List: 0,
|
||||
},
|
||||
InodeClustering: xfs.InodeClusteringStats{
|
||||
Iflush: 8677,
|
||||
Flush: 7849,
|
||||
FlushInode: 135802,
|
||||
},
|
||||
Vnode: xfs.VnodeStats{
|
||||
Active: 92601,
|
||||
Allocate: 0,
|
||||
Get: 0,
|
||||
Hold: 0,
|
||||
Release: 92444,
|
||||
Reclaim: 92444,
|
||||
Remove: 92444,
|
||||
Free: 0,
|
||||
},
|
||||
Buffer: xfs.BufferStats{
|
||||
Get: 2666287,
|
||||
Create: 7122,
|
||||
GetLocked: 2659202,
|
||||
GetLockedWaited: 3599,
|
||||
BusyLocked: 2,
|
||||
MissLocked: 7085,
|
||||
PageRetries: 0,
|
||||
PageFound: 10297,
|
||||
GetRead: 7085,
|
||||
},
|
||||
ExtendedPrecision: xfs.ExtendedPrecisionStats{
|
||||
FlushBytes: 399724544,
|
||||
WriteBytes: 92823103,
|
||||
ReadBytes: 86219234,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Logf("[%02d] test %q", i, tt.name)
|
||||
|
||||
var (
|
||||
stats *xfs.Stats
|
||||
err error
|
||||
)
|
||||
|
||||
if tt.s != "" {
|
||||
stats, err = xfs.ParseStats(strings.NewReader(tt.s))
|
||||
}
|
||||
if tt.fs {
|
||||
stats, err = procfs.FS("../fixtures").XFSStats()
|
||||
}
|
||||
|
||||
if tt.invalid && err == nil {
|
||||
t.Error("expected an error, but none occurred")
|
||||
}
|
||||
if !tt.invalid && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {
|
||||
log.Printf("stats: %#v", have)
|
||||
t.Errorf("unexpected XFS stats:\nwant:\n%v\nhave:\n%v", want, have)
|
||||
}
|
||||
}
|
||||
}
|
||||
158
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
Normal file
158
vendor/github.com/prometheus/procfs/xfs/xfs.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
||||
package xfs
|
||||
|
||||
// Stats contains XFS filesystem runtime statistics, parsed from
|
||||
// /proc/fs/xfs/stat.
|
||||
//
|
||||
// The names and meanings of each statistic were taken from
|
||||
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
||||
// kernel source. Most counters are uint32s (same data types used in
|
||||
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
||||
type Stats struct {
|
||||
ExtentAllocation ExtentAllocationStats
|
||||
AllocationBTree BTreeStats
|
||||
BlockMapping BlockMappingStats
|
||||
BlockMapBTree BTreeStats
|
||||
DirectoryOperation DirectoryOperationStats
|
||||
Transaction TransactionStats
|
||||
InodeOperation InodeOperationStats
|
||||
LogOperation LogOperationStats
|
||||
ReadWrite ReadWriteStats
|
||||
AttributeOperation AttributeOperationStats
|
||||
InodeClustering InodeClusteringStats
|
||||
Vnode VnodeStats
|
||||
Buffer BufferStats
|
||||
ExtendedPrecision ExtendedPrecisionStats
|
||||
}
|
||||
|
||||
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
||||
type ExtentAllocationStats struct {
|
||||
ExtentsAllocated uint32
|
||||
BlocksAllocated uint32
|
||||
ExtentsFreed uint32
|
||||
BlocksFreed uint32
|
||||
}
|
||||
|
||||
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
||||
type BTreeStats struct {
|
||||
Lookups uint32
|
||||
Compares uint32
|
||||
RecordsInserted uint32
|
||||
RecordsDeleted uint32
|
||||
}
|
||||
|
||||
// BlockMappingStats contains statistics regarding XFS block maps.
|
||||
type BlockMappingStats struct {
|
||||
Reads uint32
|
||||
Writes uint32
|
||||
Unmaps uint32
|
||||
ExtentListInsertions uint32
|
||||
ExtentListDeletions uint32
|
||||
ExtentListLookups uint32
|
||||
ExtentListCompares uint32
|
||||
}
|
||||
|
||||
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
||||
type DirectoryOperationStats struct {
|
||||
Lookups uint32
|
||||
Creates uint32
|
||||
Removes uint32
|
||||
Getdents uint32
|
||||
}
|
||||
|
||||
// TransactionStats contains statistics regarding XFS metadata transactions.
|
||||
type TransactionStats struct {
|
||||
Sync uint32
|
||||
Async uint32
|
||||
Empty uint32
|
||||
}
|
||||
|
||||
// InodeOperationStats contains statistics regarding XFS inode operations.
|
||||
type InodeOperationStats struct {
|
||||
Attempts uint32
|
||||
Found uint32
|
||||
Recycle uint32
|
||||
Missed uint32
|
||||
Duplicate uint32
|
||||
Reclaims uint32
|
||||
AttributeChange uint32
|
||||
}
|
||||
|
||||
// LogOperationStats contains statistics regarding the XFS log buffer.
|
||||
type LogOperationStats struct {
|
||||
Writes uint32
|
||||
Blocks uint32
|
||||
NoInternalBuffers uint32
|
||||
Force uint32
|
||||
ForceSleep uint32
|
||||
}
|
||||
|
||||
// ReadWriteStats contains statistics regarding the number of read and write
|
||||
// system calls for XFS filesystems.
|
||||
type ReadWriteStats struct {
|
||||
Read uint32
|
||||
Write uint32
|
||||
}
|
||||
|
||||
// AttributeOperationStats contains statistics regarding manipulation of
|
||||
// XFS extended file attributes.
|
||||
type AttributeOperationStats struct {
|
||||
Get uint32
|
||||
Set uint32
|
||||
Remove uint32
|
||||
List uint32
|
||||
}
|
||||
|
||||
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
||||
// operations.
|
||||
type InodeClusteringStats struct {
|
||||
Iflush uint32
|
||||
Flush uint32
|
||||
FlushInode uint32
|
||||
}
|
||||
|
||||
// VnodeStats contains statistics regarding XFS vnode operations.
|
||||
type VnodeStats struct {
|
||||
Active uint32
|
||||
Allocate uint32
|
||||
Get uint32
|
||||
Hold uint32
|
||||
Release uint32
|
||||
Reclaim uint32
|
||||
Remove uint32
|
||||
Free uint32
|
||||
}
|
||||
|
||||
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
||||
type BufferStats struct {
|
||||
Get uint32
|
||||
Create uint32
|
||||
GetLocked uint32
|
||||
GetLockedWaited uint32
|
||||
BusyLocked uint32
|
||||
MissLocked uint32
|
||||
PageRetries uint32
|
||||
PageFound uint32
|
||||
GetRead uint32
|
||||
}
|
||||
|
||||
// ExtendedPrecisionStats contains high precision counters used to track the
|
||||
// total number of bytes read, written, or flushed, during XFS operations.
|
||||
type ExtendedPrecisionStats struct {
|
||||
FlushBytes uint64
|
||||
WriteBytes uint64
|
||||
ReadBytes uint64
|
||||
}
|
||||
5
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
5
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
@@ -5,8 +5,9 @@ matrix:
|
||||
- go: 1.4.3
|
||||
env: NOVET=true # No bundled vet.
|
||||
- go: 1.5.4
|
||||
- go: 1.6.3
|
||||
- go: 1.7
|
||||
- go: 1.6.4
|
||||
- go: 1.7.5
|
||||
- go: 1.8
|
||||
- go: tip
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
3
vendor/github.com/spf13/cobra/README.md
generated
vendored
3
vendor/github.com/spf13/cobra/README.md
generated
vendored
@@ -8,6 +8,7 @@ Many of the most widely used Go projects are built using Cobra including:
|
||||
* [Hugo](http://gohugo.io)
|
||||
* [rkt](https://github.com/coreos/rkt)
|
||||
* [etcd](https://github.com/coreos/etcd)
|
||||
* [Docker](https://github.com/docker/docker)
|
||||
* [Docker (distribution)](https://github.com/docker/distribution)
|
||||
* [OpenShift](https://www.openshift.com/)
|
||||
* [Delve](https://github.com/derekparker/delve)
|
||||
@@ -755,7 +756,7 @@ providing a way to handle the errors in one location. The current list of functi
|
||||
* PersistentPostRunE
|
||||
|
||||
If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage`
|
||||
and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent
|
||||
and `SilenceErrors` to `true` on the command. A child command respects these flags if they are set on the parent
|
||||
command.
|
||||
|
||||
**Example Usage using RunE:**
|
||||
|
||||
62
vendor/github.com/spf13/cobra/command.go
generated
vendored
62
vendor/github.com/spf13/cobra/command.go
generated
vendored
@@ -57,6 +57,9 @@ type Command struct {
|
||||
Deprecated string
|
||||
// Is this command hidden and should NOT show up in the list of available commands?
|
||||
Hidden bool
|
||||
// Annotations are key/value pairs that can be used by applications to identify or
|
||||
// group commands
|
||||
Annotations map[string]string
|
||||
// Full set of flags
|
||||
flags *flag.FlagSet
|
||||
// Set of flags childrens of this command will inherit
|
||||
@@ -152,12 +155,12 @@ func (c *Command) SetUsageTemplate(s string) {
|
||||
}
|
||||
|
||||
// SetFlagErrorFunc sets a function to generate an error when flag parsing
|
||||
// fails
|
||||
// fails.
|
||||
func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
|
||||
c.flagErrorFunc = f
|
||||
}
|
||||
|
||||
// SetHelpFunc sets help function. Can be defined by Application
|
||||
// SetHelpFunc sets help function. Can be defined by Application.
|
||||
func (c *Command) SetHelpFunc(f func(*Command, []string)) {
|
||||
c.helpFunc = f
|
||||
}
|
||||
@@ -184,7 +187,7 @@ func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string
|
||||
}
|
||||
}
|
||||
|
||||
// OutOrStdout returns output to stdout
|
||||
// OutOrStdout returns output to stdout.
|
||||
func (c *Command) OutOrStdout() io.Writer {
|
||||
return c.getOut(os.Stdout)
|
||||
}
|
||||
@@ -342,19 +345,19 @@ Aliases:
|
||||
{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}}
|
||||
{{ .Example }}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Available Commands:{{range .Commands}}{{if .IsAvailableCommand}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}}
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}}
|
||||
{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasAvailableInheritedFlags}}
|
||||
|
||||
Global Flags:
|
||||
{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}}
|
||||
|
||||
Additional help topics:{{range .Commands}}{{if .IsHelpCommand}}
|
||||
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }}
|
||||
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
|
||||
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
|
||||
`
|
||||
@@ -381,20 +384,18 @@ func (c *Command) resetChildrensParents() {
|
||||
}
|
||||
}
|
||||
|
||||
// Test if the named flag is a boolean flag.
|
||||
func isBooleanFlag(name string, f *flag.FlagSet) bool {
|
||||
func hasNoOptDefVal(name string, f *flag.FlagSet) bool {
|
||||
flag := f.Lookup(name)
|
||||
if flag == nil {
|
||||
return false
|
||||
}
|
||||
return flag.Value.Type() == "bool"
|
||||
return len(flag.NoOptDefVal) > 0
|
||||
}
|
||||
|
||||
// Test if the named flag is a boolean flag.
|
||||
func isBooleanShortFlag(name string, f *flag.FlagSet) bool {
|
||||
func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
|
||||
result := false
|
||||
f.VisitAll(func(f *flag.Flag) {
|
||||
if f.Shorthand == name && f.Value.Type() == "bool" {
|
||||
fs.VisitAll(func(flag *flag.Flag) {
|
||||
if flag.Shorthand == name && len(flag.NoOptDefVal) > 0 {
|
||||
result = true
|
||||
}
|
||||
})
|
||||
@@ -420,8 +421,8 @@ func stripFlags(args []string, c *Command) []string {
|
||||
inQuote = true
|
||||
case strings.HasPrefix(y, "--") && !strings.Contains(y, "="):
|
||||
// TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
|
||||
inFlag = !isBooleanFlag(y[2:], c.Flags())
|
||||
case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()):
|
||||
inFlag = !hasNoOptDefVal(y[2:], c.Flags())
|
||||
case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !shortHasNoOptDefVal(y[1:], c.Flags()):
|
||||
inFlag = true
|
||||
case inFlag:
|
||||
inFlag = false
|
||||
@@ -455,7 +456,7 @@ func argsMinusFirstX(args []string, x string) []string {
|
||||
return args
|
||||
}
|
||||
|
||||
// Find finds the target command given the args and command tree
|
||||
// Find the target command given the args and command tree
|
||||
// Meant to be run on the highest node. Only searches down.
|
||||
func (c *Command) Find(args []string) (*Command, []string, error) {
|
||||
if c == nil {
|
||||
@@ -695,7 +696,6 @@ func (c *Command) Execute() error {
|
||||
|
||||
// ExecuteC executes the command.
|
||||
func (c *Command) ExecuteC() (cmd *Command, err error) {
|
||||
|
||||
// Regardless of what command execute is called on, run on Root only
|
||||
if c.HasParent() {
|
||||
return c.Root().ExecuteC()
|
||||
@@ -780,7 +780,7 @@ func (c *Command) initHelpCmd() {
|
||||
Run: func(c *Command, args []string) {
|
||||
cmd, _, e := c.Root().Find(args)
|
||||
if cmd == nil || e != nil {
|
||||
c.Printf("Unknown help topic %#q.", args)
|
||||
c.Printf("Unknown help topic %#q\n", args)
|
||||
c.Root().Usage()
|
||||
} else {
|
||||
cmd.Help()
|
||||
@@ -969,7 +969,8 @@ func (c *Command) Name() string {
|
||||
if i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
return name
|
||||
c.name = name
|
||||
return c.name
|
||||
}
|
||||
|
||||
// HasAlias determines if a given string is an alias of the command.
|
||||
@@ -1020,11 +1021,12 @@ func (c *Command) IsAvailableCommand() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsHelpCommand determines if a command is a 'help' command; a help command is
|
||||
// determined by the fact that it is NOT runnable/hidden/deprecated, and has no
|
||||
// sub commands that are runnable/hidden/deprecated.
|
||||
func (c *Command) IsHelpCommand() bool {
|
||||
|
||||
// IsAdditionalHelpTopicCommand determines if a command is an additional
|
||||
// help topic command; additional help topic command is determined by the
|
||||
// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
|
||||
// are runnable/hidden/deprecated.
|
||||
// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
|
||||
func (c *Command) IsAdditionalHelpTopicCommand() bool {
|
||||
// if a command is runnable, deprecated, or hidden it is not a 'help' command
|
||||
if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
|
||||
return false
|
||||
@@ -1032,7 +1034,7 @@ func (c *Command) IsHelpCommand() bool {
|
||||
|
||||
// if any non-help sub commands are found, the command is not a 'help' command
|
||||
for _, sub := range c.commands {
|
||||
if !sub.IsHelpCommand() {
|
||||
if !sub.IsAdditionalHelpTopicCommand() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1045,10 +1047,9 @@ func (c *Command) IsHelpCommand() bool {
|
||||
// that need to be shown in the usage/help default template under 'additional help
|
||||
// topics'.
|
||||
func (c *Command) HasHelpSubCommands() bool {
|
||||
|
||||
// return true on the first found available 'help' sub command
|
||||
for _, sub := range c.commands {
|
||||
if sub.IsHelpCommand() {
|
||||
if sub.IsAdditionalHelpTopicCommand() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1060,7 +1061,6 @@ func (c *Command) HasHelpSubCommands() bool {
|
||||
// HasAvailableSubCommands determines if a command has available sub commands that
|
||||
// need to be shown in the usage/help default template under 'available commands'.
|
||||
func (c *Command) HasAvailableSubCommands() bool {
|
||||
|
||||
// return true on the first found available (non deprecated/help/hidden)
|
||||
// sub command
|
||||
for _, sub := range c.commands {
|
||||
|
||||
4
vendor/github.com/spf13/cobra/doc/man_docs.go
generated
vendored
4
vendor/github.com/spf13/cobra/doc/man_docs.go
generated
vendored
@@ -49,7 +49,7 @@ func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
|
||||
header = &GenManHeader{}
|
||||
}
|
||||
for _, c := range cmd.Commands() {
|
||||
if !c.IsAvailableCommand() || c.IsHelpCommand() {
|
||||
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
if err := GenManTreeFromOpts(c, opts); err != nil {
|
||||
@@ -216,7 +216,7 @@ func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
|
||||
children := cmd.Commands()
|
||||
sort.Sort(byName(children))
|
||||
for _, c := range children {
|
||||
if !c.IsAvailableCommand() || c.IsHelpCommand() {
|
||||
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
|
||||
|
||||
4
vendor/github.com/spf13/cobra/doc/md_docs.go
generated
vendored
4
vendor/github.com/spf13/cobra/doc/md_docs.go
generated
vendored
@@ -119,7 +119,7 @@ func GenMarkdownCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string)
|
||||
sort.Sort(byName(children))
|
||||
|
||||
for _, child := range children {
|
||||
if !child.IsAvailableCommand() || child.IsHelpCommand() {
|
||||
if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
cname := name + " " + child.Name()
|
||||
@@ -149,7 +149,7 @@ func GenMarkdownTree(cmd *cobra.Command, dir string) error {
|
||||
|
||||
func GenMarkdownTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||||
for _, c := range cmd.Commands() {
|
||||
if !c.IsAvailableCommand() || c.IsHelpCommand() {
|
||||
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
if err := GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||||
|
||||
2
vendor/github.com/spf13/cobra/doc/util.go
generated
vendored
2
vendor/github.com/spf13/cobra/doc/util.go
generated
vendored
@@ -27,7 +27,7 @@ func hasSeeAlso(cmd *cobra.Command) bool {
|
||||
return true
|
||||
}
|
||||
for _, c := range cmd.Commands() {
|
||||
if !c.IsAvailableCommand() || c.IsHelpCommand() {
|
||||
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
|
||||
4
vendor/github.com/spf13/cobra/doc/yaml_docs.go
generated
vendored
4
vendor/github.com/spf13/cobra/doc/yaml_docs.go
generated
vendored
@@ -57,7 +57,7 @@ func GenYamlTree(cmd *cobra.Command, dir string) error {
|
||||
// GenYamlTreeCustom creates yaml structured ref files
|
||||
func GenYamlTreeCustom(cmd *cobra.Command, dir string, filePrepender, linkHandler func(string) string) error {
|
||||
for _, c := range cmd.Commands() {
|
||||
if !c.IsAvailableCommand() || c.IsHelpCommand() {
|
||||
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
if err := GenYamlTreeCustom(c, dir, filePrepender, linkHandler); err != nil {
|
||||
@@ -117,7 +117,7 @@ func GenYamlCustom(cmd *cobra.Command, w io.Writer, linkHandler func(string) str
|
||||
children := cmd.Commands()
|
||||
sort.Sort(byName(children))
|
||||
for _, child := range children {
|
||||
if !child.IsAvailableCommand() || child.IsHelpCommand() {
|
||||
if !child.IsAvailableCommand() || child.IsAdditionalHelpTopicCommand() {
|
||||
continue
|
||||
}
|
||||
result = append(result, child.Name()+" - "+child.Short)
|
||||
|
||||
5
vendor/github.com/tylerb/graceful/README.md
generated
vendored
5
vendor/github.com/tylerb/graceful/README.md
generated
vendored
@@ -3,6 +3,11 @@ graceful [](htt
|
||||
|
||||
Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers.
|
||||
|
||||
## Using Go 1.8?
|
||||
|
||||
If you are using Go 1.8, you may not need to use this library! Consider using `http.Server`'s built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown)
|
||||
method for graceful shutdowns.
|
||||
|
||||
## Installation
|
||||
|
||||
To install, simply execute:
|
||||
|
||||
1
vendor/github.com/tylerb/graceful/graceful.go
generated
vendored
1
vendor/github.com/tylerb/graceful/graceful.go
generated
vendored
@@ -366,6 +366,7 @@ func (srv *Server) manageConnections(add, idle, active, remove chan net.Conn, sh
|
||||
select {
|
||||
case conn := <-add:
|
||||
srv.connections[conn] = struct{}{}
|
||||
srv.idleConnections[conn] = struct{}{} // Newly-added connections are considered idle until they become active.
|
||||
case conn := <-idle:
|
||||
srv.idleConnections[conn] = struct{}{}
|
||||
case conn := <-active:
|
||||
|
||||
6
vendor/github.com/xenolf/lego/README.md
generated
vendored
6
vendor/github.com/xenolf/lego/README.md
generated
vendored
@@ -23,7 +23,11 @@ To build lego inside a Docker container, just run
|
||||
```
|
||||
docker build -t lego .
|
||||
```
|
||||
|
||||
##### From the package manager
|
||||
- [ArchLinux (AUR)](https://aur.archlinux.org/packages/lego-git):
|
||||
```
|
||||
yaourt -S lego-git
|
||||
```
|
||||
#### Features
|
||||
|
||||
- Register with CA
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user