mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Backend image rendering as plugin (#11966)
* rendering: headless chrome progress * renderer: minor change * grpc: version hell * updated grpc libs * wip: minor progess * rendering: new image rendering plugin is starting to work * feat: now phantomjs works as well and updated alerting to use new rendering service * refactor: renamed renderer package and service to rendering to make renderer name less confusing (rendering is internal service that handles the renderer plugin now) * rendering: now render key is passed and render auth is working in plugin mode * removed unneeded lines from gitignore * rendering: now plugin mode supports waiting for all panels to complete rendering * fix: LastSeenAt fix for render calls, was not set which causes a lot of updates to Last Seen at during rendering, this should fix sqlite db locked issues in seen in previous releases * change: changed render tz url parameter to use proper timezone name as chrome does not handle UTC offset TZ values * fix: another update to tz param generation * renderer: added http mode to renderer service, new ini setting [rendering] server_url
This commit is contained in:
1
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
1
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
@@ -22,6 +22,7 @@ David Symonds <dsymonds@golang.org>
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
James Hall <james.hall@shopify.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
|
||||
239
vendor/github.com/apache/thrift/LICENSE
generated
vendored
239
vendor/github.com/apache/thrift/LICENSE
generated
vendored
@@ -1,239 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------
|
||||
SOFTWARE DISTRIBUTED WITH THRIFT:
|
||||
|
||||
The Apache Thrift software includes a number of subcomponents with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for the these subcomponents is subject to the terms and
|
||||
conditions of the following licenses.
|
||||
|
||||
--------------------------------------------------
|
||||
Portions of the following files are licensed under the MIT License:
|
||||
|
||||
lib/erl/src/Makefile.am
|
||||
|
||||
Please see doc/otp-base-license.txt for the full terms of this license.
|
||||
|
||||
--------------------------------------------------
|
||||
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
|
||||
|
||||
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
|
||||
#
|
||||
# Copying and distribution of this file, with or without
|
||||
# modification, are permitted in any medium without royalty provided
|
||||
# the copyright notice and this notice are preserved.
|
||||
|
||||
--------------------------------------------------
|
||||
For the lib/nodejs/lib/thrift/json_parse.js:
|
||||
|
||||
/*
|
||||
json_parse.js
|
||||
2015-05-02
|
||||
Public Domain.
|
||||
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
|
||||
*/
|
||||
(By Douglas Crockford <douglas@crockford.com>)
|
||||
--------------------------------------------------
|
||||
5
vendor/github.com/apache/thrift/NOTICE
generated
vendored
5
vendor/github.com/apache/thrift/NOTICE
generated
vendored
@@ -1,5 +0,0 @@
|
||||
Apache Thrift
|
||||
Copyright 2006-2010 The Apache Software Foundation.
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
16
vendor/github.com/apache/thrift/contrib/fb303/LICENSE
generated
vendored
16
vendor/github.com/apache/thrift/contrib/fb303/LICENSE
generated
vendored
@@ -1,16 +0,0 @@
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
129
vendor/github.com/apache/thrift/debian/copyright
generated
vendored
129
vendor/github.com/apache/thrift/debian/copyright
generated
vendored
@@ -1,129 +0,0 @@
|
||||
This package was debianized by Thrift Developer's <dev@thrift.apache.org>.
|
||||
|
||||
|
||||
This package and the Debian packaging is licensed under the Apache License,
|
||||
see `/usr/share/common-licenses/Apache-2.0'.
|
||||
|
||||
The following information was copied from Apache Thrift LICENSE file.
|
||||
|
||||
--------------------------------------------------
|
||||
SOFTWARE DISTRIBUTED WITH THRIFT:
|
||||
|
||||
The Apache Thrift software includes a number of subcomponents with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for the these subcomponents is subject to the terms and
|
||||
conditions of the following licenses.
|
||||
|
||||
--------------------------------------------------
|
||||
Portions of the following files are licensed under the MIT License:
|
||||
|
||||
lib/erl/src/Makefile.am
|
||||
|
||||
Please see doc/otp-base-license.txt for the full terms of this license.
|
||||
|
||||
|
||||
--------------------------------------------------
|
||||
The following files contain some portions of code contributed under
|
||||
the Thrift Software License (see doc/old-thrift-license.txt), and relicensed
|
||||
under the Apache 2.0 License:
|
||||
|
||||
compiler/cpp/Makefile.am
|
||||
compiler/cpp/src/generate/t_cocoa_generator.cc
|
||||
compiler/cpp/src/generate/t_cpp_generator.cc
|
||||
compiler/cpp/src/generate/t_csharp_generator.cc
|
||||
compiler/cpp/src/generate/t_erl_generator.cc
|
||||
compiler/cpp/src/generate/t_hs_generator.cc
|
||||
compiler/cpp/src/generate/t_java_generator.cc
|
||||
compiler/cpp/src/generate/t_ocaml_generator.cc
|
||||
compiler/cpp/src/generate/t_perl_generator.cc
|
||||
compiler/cpp/src/generate/t_php_generator.cc
|
||||
compiler/cpp/src/generate/t_py_generator.cc
|
||||
compiler/cpp/src/generate/t_rb_generator.cc
|
||||
compiler/cpp/src/generate/t_st_generator.cc
|
||||
compiler/cpp/src/generate/t_xsd_generator.cc
|
||||
compiler/cpp/src/main.cc
|
||||
compiler/cpp/src/parse/t_field.h
|
||||
compiler/cpp/src/parse/t_program.h
|
||||
compiler/cpp/src/platform.h
|
||||
compiler/cpp/src/thriftl.ll
|
||||
compiler/cpp/src/thrifty.yy
|
||||
lib/csharp/src/Protocol/TBinaryProtocol.cs
|
||||
lib/csharp/src/Protocol/TField.cs
|
||||
lib/csharp/src/Protocol/TList.cs
|
||||
lib/csharp/src/Protocol/TMap.cs
|
||||
lib/csharp/src/Protocol/TMessage.cs
|
||||
lib/csharp/src/Protocol/TMessageType.cs
|
||||
lib/csharp/src/Protocol/TProtocol.cs
|
||||
lib/csharp/src/Protocol/TProtocolException.cs
|
||||
lib/csharp/src/Protocol/TProtocolFactory.cs
|
||||
lib/csharp/src/Protocol/TProtocolUtil.cs
|
||||
lib/csharp/src/Protocol/TSet.cs
|
||||
lib/csharp/src/Protocol/TStruct.cs
|
||||
lib/csharp/src/Protocol/TType.cs
|
||||
lib/csharp/src/Server/TServer.cs
|
||||
lib/csharp/src/Server/TSimpleServer.cs
|
||||
lib/csharp/src/Server/TThreadPoolServer.cs
|
||||
lib/csharp/src/TApplicationException.cs
|
||||
lib/csharp/src/Thrift.csproj
|
||||
lib/csharp/src/Thrift.sln
|
||||
lib/csharp/src/TProcessor.cs
|
||||
lib/csharp/src/Transport/TServerSocket.cs
|
||||
lib/csharp/src/Transport/TServerTransport.cs
|
||||
lib/csharp/src/Transport/TSocket.cs
|
||||
lib/csharp/src/Transport/TStreamTransport.cs
|
||||
lib/csharp/src/Transport/TTransport.cs
|
||||
lib/csharp/src/Transport/TTransportException.cs
|
||||
lib/csharp/src/Transport/TTransportFactory.cs
|
||||
lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs
|
||||
lib/csharp/ThriftMSBuildTask/ThriftBuild.cs
|
||||
lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj
|
||||
lib/rb/lib/thrift.rb
|
||||
lib/st/README
|
||||
lib/st/thrift.st
|
||||
test/OptionalRequiredTest.cpp
|
||||
test/OptionalRequiredTest.thrift
|
||||
test/ThriftTest.thrift
|
||||
|
||||
--------------------------------------------------
|
||||
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
|
||||
|
||||
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
|
||||
#
|
||||
# Copying and distribution of this file, with or without
|
||||
# modification, are permitted in any medium without royalty provided
|
||||
# the copyright notice and this notice are preserved.
|
||||
|
||||
--------------------------------------------------
|
||||
For the compiler/cpp/src/md5.[ch] components:
|
||||
|
||||
/*
|
||||
Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
L. Peter Deutsch
|
||||
ghost@aladdin.com
|
||||
|
||||
*/
|
||||
|
||||
---------------------------------------------------
|
||||
For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki,
|
||||
lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components:
|
||||
Copyright (C) 1999 - 2007 Markus Mottl
|
||||
|
||||
Licensed under the terms of the GNU Lesser General Public License 2.1
|
||||
(see doc/lgpl-2.1.txt for the full terms of this license)
|
||||
16
vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER
generated
vendored
16
vendor/github.com/apache/thrift/lib/dart/LICENSE_HEADER
generated
vendored
@@ -1,16 +0,0 @@
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
91
vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
generated
vendored
91
vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
)
|
||||
|
||||
type TBufferedTransportFactory struct {
|
||||
size int
|
||||
}
|
||||
|
||||
type TBufferedTransport struct {
|
||||
bufio.ReadWriter
|
||||
tp TTransport
|
||||
}
|
||||
|
||||
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
return NewTBufferedTransport(trans, p.size)
|
||||
}
|
||||
|
||||
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
|
||||
return &TBufferedTransportFactory{size: bufferSize}
|
||||
}
|
||||
|
||||
func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
|
||||
return &TBufferedTransport{
|
||||
ReadWriter: bufio.ReadWriter{
|
||||
Reader: bufio.NewReaderSize(trans, bufferSize),
|
||||
Writer: bufio.NewWriterSize(trans, bufferSize),
|
||||
},
|
||||
tp: trans,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) IsOpen() bool {
|
||||
return p.tp.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Open() (err error) {
|
||||
return p.tp.Open()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Close() (err error) {
|
||||
return p.tp.Close()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Read(b []byte) (int, error) {
|
||||
n, err := p.ReadWriter.Read(b)
|
||||
if err != nil {
|
||||
p.ReadWriter.Reader.Reset(p.tp)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Write(b []byte) (int, error) {
|
||||
n, err := p.ReadWriter.Write(b)
|
||||
if err != nil {
|
||||
p.ReadWriter.Writer.Reset(p.tp)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Flush() error {
|
||||
if err := p.ReadWriter.Flush(); err != nil {
|
||||
p.ReadWriter.Writer.Reset(p.tp)
|
||||
return err
|
||||
}
|
||||
return p.tp.Flush()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return p.tp.RemainingBytes()
|
||||
}
|
||||
269
vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go
generated
vendored
269
vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go
generated
vendored
@@ -1,269 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
type TDebugProtocol struct {
|
||||
Delegate TProtocol
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
type TDebugProtocolFactory struct {
|
||||
Underlying TProtocolFactory
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
|
||||
return &TDebugProtocolFactory{
|
||||
Underlying: underlying,
|
||||
LogPrefix: logPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return &TDebugProtocol{
|
||||
Delegate: t.Underlying.GetProtocol(trans),
|
||||
LogPrefix: t.LogPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
|
||||
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMessageEnd() error {
|
||||
err := tdp.Delegate.WriteMessageEnd()
|
||||
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
|
||||
err := tdp.Delegate.WriteStructBegin(name)
|
||||
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructEnd() error {
|
||||
err := tdp.Delegate.WriteStructEnd()
|
||||
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
|
||||
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldEnd() error {
|
||||
err := tdp.Delegate.WriteFieldEnd()
|
||||
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldStop() error {
|
||||
err := tdp.Delegate.WriteFieldStop()
|
||||
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
|
||||
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapEnd() error {
|
||||
err := tdp.Delegate.WriteMapEnd()
|
||||
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteListBegin(elemType, size)
|
||||
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListEnd() error {
|
||||
err := tdp.Delegate.WriteListEnd()
|
||||
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteSetBegin(elemType, size)
|
||||
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetEnd() error {
|
||||
err := tdp.Delegate.WriteSetEnd()
|
||||
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBool(value bool) error {
|
||||
err := tdp.Delegate.WriteBool(value)
|
||||
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteByte(value int8) error {
|
||||
err := tdp.Delegate.WriteByte(value)
|
||||
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI16(value int16) error {
|
||||
err := tdp.Delegate.WriteI16(value)
|
||||
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI32(value int32) error {
|
||||
err := tdp.Delegate.WriteI32(value)
|
||||
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI64(value int64) error {
|
||||
err := tdp.Delegate.WriteI64(value)
|
||||
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
|
||||
err := tdp.Delegate.WriteDouble(value)
|
||||
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteString(value string) error {
|
||||
err := tdp.Delegate.WriteString(value)
|
||||
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
|
||||
err := tdp.Delegate.WriteBinary(value)
|
||||
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
|
||||
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMessageEnd()
|
||||
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
|
||||
name, err = tdp.Delegate.ReadStructBegin()
|
||||
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
|
||||
err = tdp.Delegate.ReadStructEnd()
|
||||
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
|
||||
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
|
||||
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
|
||||
err = tdp.Delegate.ReadFieldEnd()
|
||||
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
|
||||
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
|
||||
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMapEnd()
|
||||
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadListBegin()
|
||||
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
|
||||
err = tdp.Delegate.ReadListEnd()
|
||||
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadSetBegin()
|
||||
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
|
||||
err = tdp.Delegate.ReadSetEnd()
|
||||
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
|
||||
value, err = tdp.Delegate.ReadBool()
|
||||
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
|
||||
value, err = tdp.Delegate.ReadByte()
|
||||
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
|
||||
value, err = tdp.Delegate.ReadI16()
|
||||
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
|
||||
value, err = tdp.Delegate.ReadI32()
|
||||
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
|
||||
value, err = tdp.Delegate.ReadI64()
|
||||
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
|
||||
value, err = tdp.Delegate.ReadDouble()
|
||||
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
|
||||
value, err = tdp.Delegate.ReadString()
|
||||
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
|
||||
value, err = tdp.Delegate.ReadBinary()
|
||||
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
|
||||
err = tdp.Delegate.Skip(fieldType)
|
||||
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Flush() (err error) {
|
||||
err = tdp.Delegate.Flush()
|
||||
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) Transport() TTransport {
|
||||
return tdp.Delegate.Transport()
|
||||
}
|
||||
58
vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go
generated
vendored
58
vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TDeserializer struct {
|
||||
Transport TTransport
|
||||
Protocol TProtocol
|
||||
}
|
||||
|
||||
func NewTDeserializer() *TDeserializer {
|
||||
var transport TTransport
|
||||
transport = NewTMemoryBufferLen(1024)
|
||||
|
||||
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
|
||||
|
||||
return &TDeserializer{
|
||||
transport,
|
||||
protocol}
|
||||
}
|
||||
|
||||
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write([]byte(s)); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
79
vendor/github.com/apache/thrift/lib/go/thrift/field.go
generated
vendored
79
vendor/github.com/apache/thrift/lib/go/thrift/field.go
generated
vendored
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Helper class that encapsulates field metadata.
|
||||
type field struct {
|
||||
name string
|
||||
typeId TType
|
||||
id int
|
||||
}
|
||||
|
||||
func newField(n string, t TType, i int) *field {
|
||||
return &field{name: n, typeId: t, id: i}
|
||||
}
|
||||
|
||||
func (p *field) Name() string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *field) TypeId() TType {
|
||||
if p == nil {
|
||||
return TType(VOID)
|
||||
}
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *field) Id() int {
|
||||
if p == nil {
|
||||
return -1
|
||||
}
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *field) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
|
||||
}
|
||||
|
||||
var ANONYMOUS_FIELD *field
|
||||
|
||||
type fieldSlice []field
|
||||
|
||||
func (p fieldSlice) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p fieldSlice) Less(i, j int) bool {
|
||||
return p[i].Id() < p[j].Id()
|
||||
}
|
||||
|
||||
func (p fieldSlice) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func init() {
|
||||
ANONYMOUS_FIELD = newField("", STOP, 0)
|
||||
}
|
||||
167
vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
generated
vendored
167
vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
generated
vendored
@@ -1,167 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const DEFAULT_MAX_LENGTH = 16384000
|
||||
|
||||
type TFramedTransport struct {
|
||||
transport TTransport
|
||||
buf bytes.Buffer
|
||||
reader *bufio.Reader
|
||||
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
|
||||
buffer [4]byte
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
type tFramedTransportFactory struct {
|
||||
factory TTransportFactory
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
|
||||
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
|
||||
}
|
||||
|
||||
func NewTFramedTransport(transport TTransport) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Open() error {
|
||||
return p.transport.Open()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) IsOpen() bool {
|
||||
return p.transport.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Close() error {
|
||||
return p.transport.Close()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < uint32(len(buf)) {
|
||||
frameSize := p.frameSize
|
||||
tmp := make([]byte, p.frameSize)
|
||||
l, err = p.Read(tmp)
|
||||
copy(buf, tmp)
|
||||
if err == nil {
|
||||
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
|
||||
return
|
||||
}
|
||||
}
|
||||
got, err := p.reader.Read(buf)
|
||||
p.frameSize = p.frameSize - uint32(got)
|
||||
//sanity check
|
||||
if p.frameSize < 0 {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
|
||||
}
|
||||
return got, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) ReadByte() (c byte, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < 1 {
|
||||
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
|
||||
}
|
||||
c, err = p.reader.ReadByte()
|
||||
if err == nil {
|
||||
p.frameSize--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Write(buf []byte) (int, error) {
|
||||
n, err := p.buf.Write(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteByte(c byte) error {
|
||||
return p.buf.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
|
||||
return p.buf.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Flush() error {
|
||||
size := p.buf.Len()
|
||||
buf := p.buffer[:4]
|
||||
binary.BigEndian.PutUint32(buf, uint32(size))
|
||||
_, err := p.transport.Write(buf)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if size > 0 {
|
||||
if n, err := p.buf.WriteTo(p.transport); err != nil {
|
||||
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
err = p.transport.Flush()
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
|
||||
buf := p.buffer[:4]
|
||||
if _, err := io.ReadFull(p.reader, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size := binary.BigEndian.Uint32(buf)
|
||||
if size < 0 || size > p.maxLength {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return uint64(p.frameSize)
|
||||
}
|
||||
|
||||
258
vendor/github.com/apache/thrift/lib/go/thrift/http_client.go
generated
vendored
258
vendor/github.com/apache/thrift/lib/go/thrift/http_client.go
generated
vendored
@@ -1,258 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Default to using the shared http client. Library users are
|
||||
// free to change this global client or specify one through
|
||||
// THttpClientOptions.
|
||||
var DefaultHttpClient *http.Client = http.DefaultClient
|
||||
|
||||
type THttpClient struct {
|
||||
client *http.Client
|
||||
response *http.Response
|
||||
url *url.URL
|
||||
requestBuffer *bytes.Buffer
|
||||
header http.Header
|
||||
nsecConnectTimeout int64
|
||||
nsecReadTimeout int64
|
||||
}
|
||||
|
||||
type THttpClientTransportFactory struct {
|
||||
options THttpClientOptions
|
||||
url string
|
||||
isPost bool
|
||||
}
|
||||
|
||||
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*THttpClient)
|
||||
if ok && t.url != nil {
|
||||
if t.requestBuffer != nil {
|
||||
t2, _ := NewTHttpPostClientWithOptions(t.url.String(), p.options)
|
||||
return t2
|
||||
}
|
||||
t2, _ := NewTHttpClientWithOptions(t.url.String(), p.options)
|
||||
return t2
|
||||
}
|
||||
}
|
||||
if p.isPost {
|
||||
s, _ := NewTHttpPostClientWithOptions(p.url, p.options)
|
||||
return s
|
||||
}
|
||||
s, _ := NewTHttpClientWithOptions(p.url, p.options)
|
||||
return s
|
||||
}
|
||||
|
||||
type THttpClientOptions struct {
|
||||
// If nil, DefaultHttpClient is used
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, isPost: false, options: options}
|
||||
}
|
||||
|
||||
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return NewTHttpPostClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, isPost: true, options: options}
|
||||
}
|
||||
|
||||
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response, err := http.Get(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := options.Client
|
||||
if client == nil {
|
||||
client = DefaultHttpClient
|
||||
}
|
||||
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
|
||||
return &THttpClient{client: client, response: response, url: parsedURL, header: httpHeader}, nil
|
||||
}
|
||||
|
||||
func NewTHttpClient(urlstr string) (TTransport, error) {
|
||||
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := make([]byte, 0, 1024)
|
||||
client := options.Client
|
||||
if client == nil {
|
||||
client = DefaultHttpClient
|
||||
}
|
||||
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
|
||||
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
|
||||
}
|
||||
|
||||
func NewTHttpPostClient(urlstr string) (TTransport, error) {
|
||||
return NewTHttpPostClientWithOptions(urlstr, THttpClientOptions{})
|
||||
}
|
||||
|
||||
// Set the HTTP Header for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
|
||||
func (p *THttpClient) SetHeader(key string, value string) {
|
||||
p.header.Add(key, value)
|
||||
}
|
||||
|
||||
// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// hdrValue := httpTrans.GetHeader("User-Agent")
|
||||
func (p *THttpClient) GetHeader(key string) string {
|
||||
return p.header.Get(key)
|
||||
}
|
||||
|
||||
// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.DelHeader("User-Agent")
|
||||
func (p *THttpClient) DelHeader(key string) {
|
||||
p.header.Del(key)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Open() error {
|
||||
// do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) IsOpen() bool {
|
||||
return p.response != nil || p.requestBuffer != nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) closeResponse() error {
|
||||
var err error
|
||||
if p.response != nil && p.response.Body != nil {
|
||||
// The docs specify that if keepalive is enabled and the response body is not
|
||||
// read to completion the connection will never be returned to the pool and
|
||||
// reused. Errors are being ignored here because if the connection is invalid
|
||||
// and this fails for some reason, the Close() method will do any remaining
|
||||
// cleanup.
|
||||
io.Copy(ioutil.Discard, p.response.Body)
|
||||
|
||||
err = p.response.Body.Close()
|
||||
}
|
||||
|
||||
p.response = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *THttpClient) Close() error {
|
||||
if p.requestBuffer != nil {
|
||||
p.requestBuffer.Reset()
|
||||
p.requestBuffer = nil
|
||||
}
|
||||
return p.closeResponse()
|
||||
}
|
||||
|
||||
func (p *THttpClient) Read(buf []byte) (int, error) {
|
||||
if p.response == nil {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
|
||||
}
|
||||
n, err := p.response.Body.Read(buf)
|
||||
if n > 0 && (err == nil || err == io.EOF) {
|
||||
return n, nil
|
||||
}
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *THttpClient) ReadByte() (c byte, err error) {
|
||||
return readByte(p.response.Body)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Write(buf []byte) (int, error) {
|
||||
n, err := p.requestBuffer.Write(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteByte(c byte) error {
|
||||
return p.requestBuffer.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteString(s string) (n int, err error) {
|
||||
return p.requestBuffer.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Flush() error {
|
||||
// Close any previous response body to avoid leaking connections.
|
||||
p.closeResponse()
|
||||
|
||||
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
req.Header = p.header
|
||||
response, err := p.client.Do(req)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// Close the response to avoid leaking file descriptors. closeResponse does
|
||||
// more than just call Close(), so temporarily assign it and reuse the logic.
|
||||
p.response = response
|
||||
p.closeResponse()
|
||||
|
||||
// TODO(pomack) log bad response
|
||||
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
p.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
|
||||
len := p.response.ContentLength
|
||||
if len >= 0 {
|
||||
return uint64(len)
|
||||
}
|
||||
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
34
vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
generated
vendored
34
vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import "net/http"
|
||||
|
||||
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
|
||||
func NewThriftHandlerFunc(processor TProcessor,
|
||||
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("Content-Type", "application/x-thrift")
|
||||
|
||||
transport := NewStreamTransport(r.Body, w)
|
||||
processor.Process(inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
|
||||
}
|
||||
}
|
||||
214
vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
generated
vendored
214
vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
generated
vendored
@@ -1,214 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
|
||||
type StreamTransport struct {
|
||||
io.Reader
|
||||
io.Writer
|
||||
isReadWriter bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
type StreamTransportFactory struct {
|
||||
Reader io.Reader
|
||||
Writer io.Writer
|
||||
isReadWriter bool
|
||||
}
|
||||
|
||||
func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*StreamTransport)
|
||||
if ok {
|
||||
if t.isReadWriter {
|
||||
return NewStreamTransportRW(t.Reader.(io.ReadWriter))
|
||||
}
|
||||
if t.Reader != nil && t.Writer != nil {
|
||||
return NewStreamTransport(t.Reader, t.Writer)
|
||||
}
|
||||
if t.Reader != nil && t.Writer == nil {
|
||||
return NewStreamTransportR(t.Reader)
|
||||
}
|
||||
if t.Reader == nil && t.Writer != nil {
|
||||
return NewStreamTransportW(t.Writer)
|
||||
}
|
||||
return &StreamTransport{}
|
||||
}
|
||||
}
|
||||
if p.isReadWriter {
|
||||
return NewStreamTransportRW(p.Reader.(io.ReadWriter))
|
||||
}
|
||||
if p.Reader != nil && p.Writer != nil {
|
||||
return NewStreamTransport(p.Reader, p.Writer)
|
||||
}
|
||||
if p.Reader != nil && p.Writer == nil {
|
||||
return NewStreamTransportR(p.Reader)
|
||||
}
|
||||
if p.Reader == nil && p.Writer != nil {
|
||||
return NewStreamTransportW(p.Writer)
|
||||
}
|
||||
return &StreamTransport{}
|
||||
}
|
||||
|
||||
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
|
||||
return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
|
||||
}
|
||||
|
||||
func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportR(r io.Reader) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r)}
|
||||
}
|
||||
|
||||
func NewStreamTransportW(w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
|
||||
bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
|
||||
return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
|
||||
}
|
||||
|
||||
func (p *StreamTransport) IsOpen() bool {
|
||||
return !p.closed
|
||||
}
|
||||
|
||||
// implicitly opened on creation, can't be reopened once closed
|
||||
func (p *StreamTransport) Open() error {
|
||||
if !p.closed {
|
||||
return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
|
||||
} else {
|
||||
return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
|
||||
}
|
||||
}
|
||||
|
||||
// Closes both the input and output streams.
|
||||
func (p *StreamTransport) Close() error {
|
||||
if p.closed {
|
||||
return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
|
||||
}
|
||||
p.closed = true
|
||||
closedReader := false
|
||||
if p.Reader != nil {
|
||||
c, ok := p.Reader.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
closedReader = true
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Reader = nil
|
||||
}
|
||||
if p.Writer != nil && (!closedReader || !p.isReadWriter) {
|
||||
c, ok := p.Writer.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Writer = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flushes the underlying output stream if not null.
|
||||
func (p *StreamTransport) Flush() error {
|
||||
if p.Writer == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
|
||||
}
|
||||
f, ok := p.Writer.(Flusher)
|
||||
if ok {
|
||||
err := f.Flush()
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Read(c []byte) (n int, err error) {
|
||||
n, err = p.Reader.Read(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) ReadByte() (c byte, err error) {
|
||||
f, ok := p.Reader.(io.ByteReader)
|
||||
if ok {
|
||||
c, err = f.ReadByte()
|
||||
} else {
|
||||
c, err = readByte(p.Reader)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Write(c []byte) (n int, err error) {
|
||||
n, err = p.Writer.Write(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteByte(c byte) (err error) {
|
||||
f, ok := p.Writer.(io.ByteWriter)
|
||||
if ok {
|
||||
err = f.WriteByte(c)
|
||||
} else {
|
||||
err = writeByte(p.Writer, c)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteString(s string) (n int, err error) {
|
||||
f, ok := p.Writer.(stringWriter)
|
||||
if ok {
|
||||
n, err = f.WriteString(s)
|
||||
} else {
|
||||
n, err = p.Writer.Write([]byte(s))
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
||||
583
vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go
generated
vendored
583
vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go
generated
vendored
@@ -1,583 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
THRIFT_JSON_PROTOCOL_VERSION = 1
|
||||
)
|
||||
|
||||
// for references to _ParseContext see tsimplejson_protocol.go
|
||||
|
||||
// JSON protocol implementation for thrift.
|
||||
//
|
||||
// This protocol produces/consumes a simple output format
|
||||
// suitable for parsing by scripting languages. It should not be
|
||||
// confused with the full-featured TJSONProtocol.
|
||||
//
|
||||
type TJSONProtocol struct {
|
||||
*TSimpleJSONProtocol
|
||||
}
|
||||
|
||||
// Constructor
|
||||
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
|
||||
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
|
||||
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
|
||||
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
|
||||
return v
|
||||
}
|
||||
|
||||
// Factory
|
||||
type TJSONProtocolFactory struct{}
|
||||
|
||||
func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return NewTJSONProtocol(trans)
|
||||
}
|
||||
|
||||
func NewTJSONProtocolFactory() *TJSONProtocolFactory {
|
||||
return &TJSONProtocolFactory{}
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
|
||||
p.resetContextStack() // THRIFT-3735
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteString(name); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteByte(int8(typeId)); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(seqId); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructBegin(name string) error {
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
if e := p.WriteI16(id); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(typeId)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
|
||||
|
||||
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(keyType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 = p.TypeIdToString(valueType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputObjectBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMapEnd() error {
|
||||
if e := p.OutputObjectEnd(); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBool(b bool) error {
|
||||
if b {
|
||||
return p.WriteI32(1)
|
||||
}
|
||||
return p.WriteI32(0)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteByte(b int8) error {
|
||||
return p.WriteI32(int32(b))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI16(v int16) error {
|
||||
return p.WriteI32(int32(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI32(v int32) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI64(v int64) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteDouble(v float64) error {
|
||||
return p.OutputF64(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteString(v string) error {
|
||||
return p.OutputString(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBinary(v []byte) error {
|
||||
// JSON library only takes in a string,
|
||||
// not an arbitrary byte array, to ensure bytes are transmitted
|
||||
// efficiently we must convert this into a valid JSON string
|
||||
// therefore we use base64 encoding to avoid excessive escaping/quoting
|
||||
if e := p.OutputPreValue(); e != nil {
|
||||
return e
|
||||
}
|
||||
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
writer := base64.NewEncoder(base64.StdEncoding, p.writer)
|
||||
if _, e := writer.Write(v); e != nil {
|
||||
p.writer.Reset(p.trans) // THRIFT-3735
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if e := writer.Close(); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
return p.OutputPostValue()
|
||||
}
|
||||
|
||||
// Reading methods.
|
||||
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
p.resetContextStack() // THRIFT-3735
|
||||
if isNull, err := p.ParseListBegin(); isNull || err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
version, err := p.ReadI32()
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if version != THRIFT_JSON_PROTOCOL_VERSION {
|
||||
e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
|
||||
}
|
||||
if name, err = p.ReadString(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
bTypeId, err := p.ReadByte()
|
||||
typeId = TMessageType(bTypeId)
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if seqId, err = p.ReadI32(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMessageEnd() error {
|
||||
err := p.ParseListEnd()
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
|
||||
_, err = p.ParseObjectStart()
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
|
||||
b, _ := p.reader.Peek(1)
|
||||
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
|
||||
return "", STOP, -1, nil
|
||||
}
|
||||
fieldId, err := p.ReadI16()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
if _, err = p.ParseObjectStart(); err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
sType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
fType, err := p.StringToTypeId(sType)
|
||||
return "", fType, fieldId, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, VOID, 0, e
|
||||
}
|
||||
|
||||
// read keyType
|
||||
sKeyType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
keyType, e = p.StringToTypeId(sKeyType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read valueType
|
||||
sValueType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
valueType, e = p.StringToTypeId(sValueType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read size
|
||||
iSize, e := p.ReadI64()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
size = int(iSize)
|
||||
|
||||
_, e = p.ParseObjectStart()
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapEnd() error {
|
||||
e := p.ParseObjectEnd()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBool() (bool, error) {
|
||||
value, err := p.ReadI32()
|
||||
return (value != 0), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI16() (int16, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI32() (int32, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI64() (int64, error) {
|
||||
v, _, err := p.ParseI64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadDouble() (float64, error) {
|
||||
v, _, err := p.ParseF64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadString() (string, error) {
|
||||
var v string
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseStringBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
|
||||
var v []byte
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseBase64EncodedBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Flush() (err error) {
|
||||
err = p.writer.Flush()
|
||||
if err == nil {
|
||||
err = p.trans.Flush()
|
||||
}
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Transport() TTransport {
|
||||
return p.trans
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.OutputString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
|
||||
switch byte(fieldType) {
|
||||
case BOOL:
|
||||
return "tf", nil
|
||||
case BYTE:
|
||||
return "i8", nil
|
||||
case I16:
|
||||
return "i16", nil
|
||||
case I32:
|
||||
return "i32", nil
|
||||
case I64:
|
||||
return "i64", nil
|
||||
case DOUBLE:
|
||||
return "dbl", nil
|
||||
case STRING:
|
||||
return "str", nil
|
||||
case STRUCT:
|
||||
return "rec", nil
|
||||
case MAP:
|
||||
return "map", nil
|
||||
case SET:
|
||||
return "set", nil
|
||||
case LIST:
|
||||
return "lst", nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
|
||||
return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
|
||||
switch fieldType {
|
||||
case "tf":
|
||||
return TType(BOOL), nil
|
||||
case "i8":
|
||||
return TType(BYTE), nil
|
||||
case "i16":
|
||||
return TType(I16), nil
|
||||
case "i32":
|
||||
return TType(I32), nil
|
||||
case "i64":
|
||||
return TType(I64), nil
|
||||
case "dbl":
|
||||
return TType(DOUBLE), nil
|
||||
case "str":
|
||||
return TType(STRING), nil
|
||||
case "rec":
|
||||
return TType(STRUCT), nil
|
||||
case "map":
|
||||
return TType(MAP), nil
|
||||
case "set":
|
||||
return TType(SET), nil
|
||||
case "lst":
|
||||
return TType(LIST), nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
|
||||
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
169
vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
generated
vendored
169
vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
generated
vendored
@@ -1,169 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
TMultiplexedProtocol is a protocol-independent concrete decorator
|
||||
that allows a Thrift client to communicate with a multiplexing Thrift server,
|
||||
by prepending the service name to the function name during function calls.
|
||||
|
||||
NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
|
||||
from a multiplexing client.
|
||||
|
||||
This example uses a single socket transport to invoke two services:
|
||||
|
||||
socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
|
||||
transport := thrift.NewTFramedTransport(socket)
|
||||
protocol := thrift.NewTBinaryProtocolTransport(transport)
|
||||
|
||||
mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
|
||||
service := Calculator.NewCalculatorClient(mp)
|
||||
|
||||
mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
|
||||
service2 := WeatherReport.NewWeatherReportClient(mp2)
|
||||
|
||||
err := transport.Open()
|
||||
if err != nil {
|
||||
t.Fatal("Unable to open client socket", err)
|
||||
}
|
||||
|
||||
fmt.Println(service.Add(2,2))
|
||||
fmt.Println(service2.GetTemperature())
|
||||
*/
|
||||
|
||||
type TMultiplexedProtocol struct {
|
||||
TProtocol
|
||||
serviceName string
|
||||
}
|
||||
|
||||
const MULTIPLEXED_SEPARATOR = ":"
|
||||
|
||||
func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
|
||||
return &TMultiplexedProtocol{
|
||||
TProtocol: protocol,
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
if typeId == CALL || typeId == ONEWAY {
|
||||
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
|
||||
} else {
|
||||
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TMultiplexedProcessor is a TProcessor allowing
|
||||
a single TServer to provide multiple services.
|
||||
|
||||
To do so, you instantiate the processor and then register additional
|
||||
processors with it, as shown in the following example:
|
||||
|
||||
var processor = thrift.NewTMultiplexedProcessor()
|
||||
|
||||
firstProcessor :=
|
||||
processor.RegisterProcessor("FirstService", firstProcessor)
|
||||
|
||||
processor.registerProcessor(
|
||||
"Calculator",
|
||||
Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
|
||||
)
|
||||
|
||||
processor.registerProcessor(
|
||||
"WeatherReport",
|
||||
WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
|
||||
)
|
||||
|
||||
serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create server socket", err)
|
||||
}
|
||||
server := thrift.NewTSimpleServer2(processor, serverTransport)
|
||||
server.Serve();
|
||||
*/
|
||||
|
||||
type TMultiplexedProcessor struct {
|
||||
serviceProcessorMap map[string]TProcessor
|
||||
DefaultProcessor TProcessor
|
||||
}
|
||||
|
||||
func NewTMultiplexedProcessor() *TMultiplexedProcessor {
|
||||
return &TMultiplexedProcessor{
|
||||
serviceProcessorMap: make(map[string]TProcessor),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
|
||||
t.DefaultProcessor = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
|
||||
if t.serviceProcessorMap == nil {
|
||||
t.serviceProcessorMap = make(map[string]TProcessor)
|
||||
}
|
||||
t.serviceProcessorMap[name] = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
|
||||
name, typeId, seqid, err := in.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if typeId != CALL && typeId != ONEWAY {
|
||||
return false, fmt.Errorf("Unexpected message type %v", typeId)
|
||||
}
|
||||
//extract the service name
|
||||
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
|
||||
if len(v) != 2 {
|
||||
if t.DefaultProcessor != nil {
|
||||
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
|
||||
return t.DefaultProcessor.Process(smb, out)
|
||||
}
|
||||
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
|
||||
}
|
||||
actualProcessor, ok := t.serviceProcessorMap[v[0]]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
|
||||
}
|
||||
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
|
||||
return actualProcessor.Process(smb, out)
|
||||
}
|
||||
|
||||
//Protocol that use stored message for ReadMessageBegin
|
||||
type storedMessageProtocol struct {
|
||||
TProtocol
|
||||
name string
|
||||
typeId TMessageType
|
||||
seqid int32
|
||||
}
|
||||
|
||||
func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
|
||||
return &storedMessageProtocol{protocol, name, typeId, seqid}
|
||||
}
|
||||
|
||||
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
return s.name, s.typeId, s.seqid, nil
|
||||
}
|
||||
50
vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go
generated
vendored
50
vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// This file is home to helpers that convert from various base types to
|
||||
// respective pointer types. This is necessary because Go does not permit
|
||||
// references to constants, nor can a pointer type to base type be allocated
|
||||
// and initialized in a single expression.
|
||||
//
|
||||
// E.g., this is not allowed:
|
||||
//
|
||||
// var ip *int = &5
|
||||
//
|
||||
// But this *is* allowed:
|
||||
//
|
||||
// func IntPtr(i int) *int { return &i }
|
||||
// var ip *int = IntPtr(5)
|
||||
//
|
||||
// Since pointers to base types are commonplace as [optional] fields in
|
||||
// exported thrift structs, we factor such helpers here.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func Float32Ptr(v float32) *float32 { return &v }
|
||||
func Float64Ptr(v float64) *float64 { return &v }
|
||||
func IntPtr(v int) *int { return &v }
|
||||
func Int32Ptr(v int32) *int32 { return &v }
|
||||
func Int64Ptr(v int64) *int64 { return &v }
|
||||
func StringPtr(v string) *string { return &v }
|
||||
func Uint32Ptr(v uint32) *uint32 { return &v }
|
||||
func Uint64Ptr(v uint64) *uint64 { return &v }
|
||||
func BoolPtr(v bool) *bool { return &v }
|
||||
func ByteSlicePtr(v []byte) *[]byte { return &v }
|
||||
58
vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
generated
vendored
58
vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// The default processor factory just returns a singleton
|
||||
// instance.
|
||||
type TProcessorFactory interface {
|
||||
GetProcessor(trans TTransport) TProcessor
|
||||
}
|
||||
|
||||
type tProcessorFactory struct {
|
||||
processor TProcessor
|
||||
}
|
||||
|
||||
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
|
||||
return &tProcessorFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
|
||||
return p.processor
|
||||
}
|
||||
|
||||
/**
|
||||
* The default processor factory just returns a singleton
|
||||
* instance.
|
||||
*/
|
||||
type TProcessorFunctionFactory interface {
|
||||
GetProcessorFunction(trans TTransport) TProcessorFunction
|
||||
}
|
||||
|
||||
type tProcessorFunctionFactory struct {
|
||||
processor TProcessorFunction
|
||||
}
|
||||
|
||||
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
|
||||
return &tProcessorFunctionFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
|
||||
return p.processor
|
||||
}
|
||||
35
vendor/github.com/apache/thrift/lib/go/thrift/server.go
generated
vendored
35
vendor/github.com/apache/thrift/lib/go/thrift/server.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TServer interface {
|
||||
ProcessorFactory() TProcessorFactory
|
||||
ServerTransport() TServerTransport
|
||||
InputTransportFactory() TTransportFactory
|
||||
OutputTransportFactory() TTransportFactory
|
||||
InputProtocolFactory() TProtocolFactory
|
||||
OutputProtocolFactory() TProtocolFactory
|
||||
|
||||
// Starts the server
|
||||
Serve() error
|
||||
// Stops the server. This is optional on a per-implementation basis. Not
|
||||
// all servers are required to be cleanly stoppable.
|
||||
Stop() error
|
||||
}
|
||||
122
vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go
generated
vendored
122
vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go
generated
vendored
@@ -1,122 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
|
||||
// Protects the interrupted value to make it thread safe.
|
||||
mu sync.RWMutex
|
||||
interrupted bool
|
||||
}
|
||||
|
||||
func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
|
||||
return NewTServerSocketTimeout(listenAddr, 0)
|
||||
}
|
||||
|
||||
func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := net.Listen(p.addr.Network(), p.addr.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Accept() (TTransport, error) {
|
||||
p.mu.RLock()
|
||||
interrupted := p.interrupted
|
||||
p.mu.RUnlock()
|
||||
|
||||
if interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Addr() net.Addr {
|
||||
if p.listener != nil {
|
||||
return p.listener.Addr()
|
||||
}
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Interrupt() error {
|
||||
p.mu.Lock()
|
||||
p.interrupted = true
|
||||
p.Close()
|
||||
p.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
34
vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go
generated
vendored
34
vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Server transport. Object which provides client transports.
|
||||
type TServerTransport interface {
|
||||
Listen() error
|
||||
Accept() (TTransport, error)
|
||||
Close() error
|
||||
|
||||
// Optional method implementation. This signals to the server transport
|
||||
// that it should break out of any accept() or listen() that it is currently
|
||||
// blocked on. This method, if implemented, MUST be thread safe, as it may
|
||||
// be called from a different thread context than the other TServerTransport
|
||||
// methods.
|
||||
Interrupt() error
|
||||
}
|
||||
196
vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go
generated
vendored
196
vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go
generated
vendored
@@ -1,196 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Simple, non-concurrent server for testing.
|
||||
type TSimpleServer struct {
|
||||
quit chan struct{}
|
||||
|
||||
processorFactory TProcessorFactory
|
||||
serverTransport TServerTransport
|
||||
inputTransportFactory TTransportFactory
|
||||
outputTransportFactory TTransportFactory
|
||||
inputProtocolFactory TProtocolFactory
|
||||
outputProtocolFactory TProtocolFactory
|
||||
}
|
||||
|
||||
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
|
||||
}
|
||||
|
||||
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
inputTransportFactory,
|
||||
outputTransportFactory,
|
||||
inputProtocolFactory,
|
||||
outputProtocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
NewTTransportFactory(),
|
||||
NewTTransportFactory(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return &TSimpleServer{
|
||||
processorFactory: processorFactory,
|
||||
serverTransport: serverTransport,
|
||||
inputTransportFactory: inputTransportFactory,
|
||||
outputTransportFactory: outputTransportFactory,
|
||||
inputProtocolFactory: inputProtocolFactory,
|
||||
outputProtocolFactory: outputProtocolFactory,
|
||||
quit: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
|
||||
return p.processorFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ServerTransport() TServerTransport {
|
||||
return p.serverTransport
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
|
||||
return p.inputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
|
||||
return p.outputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
|
||||
return p.inputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
|
||||
return p.outputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Listen() error {
|
||||
return p.serverTransport.Listen()
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) AcceptLoop() error {
|
||||
for {
|
||||
client, err := p.serverTransport.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-p.quit:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return err
|
||||
}
|
||||
if client != nil {
|
||||
go func() {
|
||||
if err := p.processRequests(client); err != nil {
|
||||
log.Println("error processing request:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Serve() error {
|
||||
err := p.Listen()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.AcceptLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
|
||||
func (p *TSimpleServer) Stop() error {
|
||||
q := func() {
|
||||
p.quit <- struct{}{}
|
||||
p.serverTransport.Interrupt()
|
||||
}
|
||||
once.Do(q)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) processRequests(client TTransport) error {
|
||||
processor := p.processorFactory.GetProcessor(client)
|
||||
inputTransport := p.inputTransportFactory.GetTransport(client)
|
||||
outputTransport := p.outputTransportFactory.GetTransport(client)
|
||||
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
|
||||
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
log.Printf("panic in processor: %s: %s", e, debug.Stack())
|
||||
}
|
||||
}()
|
||||
if inputTransport != nil {
|
||||
defer inputTransport.Close()
|
||||
}
|
||||
if outputTransport != nil {
|
||||
defer outputTransport.Close()
|
||||
}
|
||||
for {
|
||||
ok, err := processor.Process(inputProtocol, outputProtocol)
|
||||
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
log.Printf("error processing request: %s", err)
|
||||
return err
|
||||
}
|
||||
if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
166
vendor/github.com/apache/thrift/lib/go/thrift/socket.go
generated
vendored
166
vendor/github.com/apache/thrift/lib/go/thrift/socket.go
generated
vendored
@@ -1,166 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSocket struct {
|
||||
conn net.Conn
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewTSocket creates a net.Conn-backed TTransport, given a host and port
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSocket("localhost:9090")
|
||||
func NewTSocket(hostPort string) (*TSocket, error) {
|
||||
return NewTSocketTimeout(hostPort, 0)
|
||||
}
|
||||
|
||||
// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a timeout as a time.Duration
|
||||
func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
|
||||
//conn, err := net.DialTimeout(network, address, timeout)
|
||||
addr, err := net.ResolveTCPAddr("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTSocketFromAddrTimeout(addr, timeout), nil
|
||||
}
|
||||
|
||||
// Creates a TSocket from a net.Addr
|
||||
func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
|
||||
return &TSocket{addr: addr, timeout: timeout}
|
||||
}
|
||||
|
||||
// Creates a TSocket from an existing net.Conn
|
||||
func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
|
||||
return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSocket) Open() error {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
var err error
|
||||
if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//Returns the remote address of the socket.
|
||||
func (p *TSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSocket) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
||||
109
vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
generated
vendored
109
vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
generated
vendored
@@ -1,109 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
type TSSLServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
interrupted bool
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
|
||||
return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
|
||||
}
|
||||
|
||||
func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Accept() (TTransport, error) {
|
||||
if p.interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TSSLServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Interrupt() error {
|
||||
p.interrupted = true
|
||||
return nil
|
||||
}
|
||||
171
vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
generated
vendored
171
vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go
generated
vendored
@@ -1,171 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSSLSocket struct {
|
||||
conn net.Conn
|
||||
// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
|
||||
// only valid if addr is nil.
|
||||
hostPort string
|
||||
// addr is nil when hostPort is not "", and is only used when the
|
||||
// TSSLSocket is constructed from a net.Addr.
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
|
||||
func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
|
||||
return NewTSSLSocketTimeout(hostPort, cfg, 0)
|
||||
}
|
||||
|
||||
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a tls Configuration and a timeout as a time.Duration
|
||||
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
|
||||
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from a net.Addr
|
||||
func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from an existing net.Conn
|
||||
func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLSocket) Open() error {
|
||||
var err error
|
||||
// If we have a hostname, we need to pass the hostname to tls.Dial for
|
||||
// certificate hostname checks.
|
||||
if p.hostPort != "" {
|
||||
if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
} else {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSSLSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSSLSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSSLSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
||||
117
vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go
generated
vendored
117
vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go
generated
vendored
@@ -1,117 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// TZlibTransportFactory is a factory for TZlibTransport instances
|
||||
type TZlibTransportFactory struct {
|
||||
level int
|
||||
}
|
||||
|
||||
// TZlibTransport is a TTransport implementation that makes use of zlib compression.
|
||||
type TZlibTransport struct {
|
||||
reader io.ReadCloser
|
||||
transport TTransport
|
||||
writer *zlib.Writer
|
||||
}
|
||||
|
||||
// GetTransport constructs a new instance of NewTZlibTransport
|
||||
func (p *TZlibTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
t, _ := NewTZlibTransport(trans, p.level)
|
||||
return t
|
||||
}
|
||||
|
||||
// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
|
||||
func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
|
||||
return &TZlibTransportFactory{level: level}
|
||||
}
|
||||
|
||||
// NewTZlibTransport constructs a new instance of TZlibTransport
|
||||
func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
|
||||
w, err := zlib.NewWriterLevel(trans, level)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TZlibTransport{
|
||||
writer: w,
|
||||
transport: trans,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the reader and writer (flushing any unwritten data) and closes
|
||||
// the underlying transport.
|
||||
func (z *TZlibTransport) Close() error {
|
||||
if z.reader != nil {
|
||||
if err := z.reader.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := z.writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Close()
|
||||
}
|
||||
|
||||
// Flush flushes the writer and its underlying transport.
|
||||
func (z *TZlibTransport) Flush() error {
|
||||
if err := z.writer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Flush()
|
||||
}
|
||||
|
||||
// IsOpen returns true if the transport is open
|
||||
func (z *TZlibTransport) IsOpen() bool {
|
||||
return z.transport.IsOpen()
|
||||
}
|
||||
|
||||
// Open opens the transport for communication
|
||||
func (z *TZlibTransport) Open() error {
|
||||
return z.transport.Open()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Read(p []byte) (int, error) {
|
||||
if z.reader == nil {
|
||||
r, err := zlib.NewReader(z.transport)
|
||||
if err != nil {
|
||||
return 0, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
z.reader = r
|
||||
}
|
||||
|
||||
return z.reader.Read(p)
|
||||
}
|
||||
|
||||
// RemainingBytes returns the size in bytes of the data that is still to be
|
||||
// read.
|
||||
func (z *TZlibTransport) RemainingBytes() uint64 {
|
||||
return z.transport.RemainingBytes()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Write(p []byte) (int, error) {
|
||||
return z.writer.Write(p)
|
||||
}
|
||||
1
vendor/github.com/apache/thrift/tutorial/erl/client.sh
generated
vendored
1
vendor/github.com/apache/thrift/tutorial/erl/client.sh
generated
vendored
@@ -1 +0,0 @@
|
||||
server.sh
|
||||
6
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
@@ -15,6 +15,12 @@ type Config struct {
|
||||
Endpoint string
|
||||
SigningRegion string
|
||||
SigningName string
|
||||
|
||||
// States that the signing name did not come from a modeled source but
|
||||
// was derived based on other data. Used by service client constructors
|
||||
// to determine if the signin name can be overriden based on metadata the
|
||||
// service has.
|
||||
SigningNameDerived bool
|
||||
}
|
||||
|
||||
// ConfigProvider provides a generic way for a service client to receive
|
||||
|
||||
28
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
@@ -1,12 +1,11 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
)
|
||||
|
||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||
@@ -31,8 +30,6 @@ func (d DefaultRetryer) MaxRetries() int {
|
||||
return d.NumMaxRetries
|
||||
}
|
||||
|
||||
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
// Set the upper limit of delay in retrying at ~five minutes
|
||||
@@ -53,7 +50,7 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
retryCount = 13
|
||||
}
|
||||
|
||||
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
}
|
||||
|
||||
@@ -65,7 +62,7 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||
return *r.Retryable
|
||||
}
|
||||
|
||||
if r.HTTPResponse.StatusCode >= 500 {
|
||||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
||||
return true
|
||||
}
|
||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||
@@ -117,22 +114,3 @@ func canUseRetryAfterHeader(r *request.Request) bool {
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// lockedSource is a thread-safe implementation of rand.Source
|
||||
type lockedSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
func (r *lockedSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *lockedSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
@@ -46,6 +46,7 @@ func (reader *teeReaderCloser) Close() error {
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
@@ -53,6 +54,9 @@ func logRequest(r *request.Request) {
|
||||
}
|
||||
|
||||
if logBody {
|
||||
if !bodySeekable {
|
||||
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
|
||||
}
|
||||
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
||||
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
||||
// client reader.
|
||||
|
||||
22
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
@@ -151,6 +151,15 @@ type Config struct {
|
||||
// with accelerate.
|
||||
S3UseAccelerate *bool
|
||||
|
||||
// S3DisableContentMD5Validation config option is temporarily disabled,
|
||||
// For S3 GetObject API calls, #1837.
|
||||
//
|
||||
// Set this to `true` to disable the S3 service client from automatically
|
||||
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
|
||||
// will also disable the SDK from performing object ContentMD5 validation
|
||||
// on GetObject API calls.
|
||||
S3DisableContentMD5Validation *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the
|
||||
// EC2Metadata client to create a new http.Client. This options is only
|
||||
@@ -336,6 +345,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||
c.S3UseAccelerate = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithS3DisableContentMD5Validation sets a config
|
||||
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
|
||||
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
|
||||
c.S3DisableContentMD5Validation = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||
@@ -435,6 +453,10 @@ func mergeInConfig(dst *Config, other *Config) {
|
||||
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||
}
|
||||
|
||||
if other.S3DisableContentMD5Validation != nil {
|
||||
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
|
||||
}
|
||||
|
||||
if other.UseDualStack != nil {
|
||||
dst.UseDualStack = other.UseDualStack
|
||||
}
|
||||
|
||||
28
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
28
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
@@ -3,12 +3,10 @@ package corehandlers
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
|
||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||
} else {
|
||||
switch body := r.Body.(type) {
|
||||
case nil:
|
||||
length = 0
|
||||
case lener:
|
||||
length = int64(body.Len())
|
||||
case io.Seeker:
|
||||
r.BodyStart, _ = body.Seek(0, 1)
|
||||
end, _ := body.Seek(0, 2)
|
||||
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
|
||||
length = end - r.BodyStart
|
||||
default:
|
||||
panic("Cannot get length of body, must provide `ContentLength`")
|
||||
if r.Body != nil {
|
||||
var err error
|
||||
length, err = aws.SeekerLen(r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,13 +53,6 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
|
||||
}
|
||||
}}
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
Name: "core.SDKVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
|
||||
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
||||
|
||||
// ValidateReqSigHandler is a request handler to ensure that the request's
|
||||
|
||||
37
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
Normal file
37
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
package corehandlers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
|
||||
// to the user agent.
|
||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
Name: "core.SDKVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
|
||||
const execEnvVar = `AWS_EXECUTION_ENV`
|
||||
const execEnvUAKey = `exec_env`
|
||||
|
||||
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
|
||||
// execution environment to the user agent.
|
||||
//
|
||||
// If the environment variable AWS_EXECUTION_ENV is set, its value will be
|
||||
// appended to the user agent string.
|
||||
var AddHostExecEnvUserAgentHander = request.NamedHandler{
|
||||
Name: "core.AddHostExecEnvUserAgentHander",
|
||||
Fn: func(r *request.Request) {
|
||||
v := os.Getenv(execEnvVar)
|
||||
if len(v) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
|
||||
},
|
||||
}
|
||||
1
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
@@ -73,6 +73,7 @@ func Handlers() request.Handlers {
|
||||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
|
||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
|
||||
|
||||
24
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
@@ -1,5 +1,10 @@
|
||||
// Package ec2metadata provides the client for making API calls to the
|
||||
// EC2 Metadata service.
|
||||
//
|
||||
// This package's client can be disabled completely by setting the environment
|
||||
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
|
||||
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
|
||||
// be used while the environemnt variable is set to true, (case insensitive).
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
@@ -7,17 +12,21 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// ServiceName is the name of the service.
|
||||
const ServiceName = "ec2metadata"
|
||||
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
|
||||
|
||||
// A EC2Metadata is an EC2 Metadata service Client.
|
||||
type EC2Metadata struct {
|
||||
@@ -75,6 +84,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
||||
svc.Handlers.Validate.Clear()
|
||||
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||
|
||||
// Disable the EC2 Metadata service if the environment variable is set.
|
||||
// This shortcirctes the service's functionality to always fail to send
|
||||
// requests.
|
||||
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
|
||||
svc.Handlers.Send.SwapNamed(request.NamedHandler{
|
||||
Name: corehandlers.SendHandler.Name,
|
||||
Fn: func(r *request.Request) {
|
||||
r.Error = awserr.New(
|
||||
request.CanceledErrorCode,
|
||||
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
|
||||
nil)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Add additional options to the service config
|
||||
for _, option := range opts {
|
||||
option(svc.Client)
|
||||
|
||||
494
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
494
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@@ -45,15 +45,20 @@ const (
|
||||
|
||||
// Service identifiers
|
||||
const (
|
||||
A4bServiceID = "a4b" // A4b.
|
||||
AcmServiceID = "acm" // Acm.
|
||||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
AthenaServiceID = "athena" // Athena.
|
||||
AutoscalingServiceID = "autoscaling" // Autoscaling.
|
||||
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
|
||||
BatchServiceID = "batch" // Batch.
|
||||
BudgetsServiceID = "budgets" // Budgets.
|
||||
CeServiceID = "ce" // Ce.
|
||||
Cloud9ServiceID = "cloud9" // Cloud9.
|
||||
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
|
||||
CloudformationServiceID = "cloudformation" // Cloudformation.
|
||||
CloudfrontServiceID = "cloudfront" // Cloudfront.
|
||||
@@ -69,6 +74,7 @@ const (
|
||||
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
|
||||
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
|
||||
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
|
||||
ComprehendServiceID = "comprehend" // Comprehend.
|
||||
ConfigServiceID = "config" // Config.
|
||||
CurServiceID = "cur" // Cur.
|
||||
DatapipelineServiceID = "datapipeline" // Datapipeline.
|
||||
@@ -94,10 +100,12 @@ const (
|
||||
EsServiceID = "es" // Es.
|
||||
EventsServiceID = "events" // Events.
|
||||
FirehoseServiceID = "firehose" // Firehose.
|
||||
FmsServiceID = "fms" // Fms.
|
||||
GameliftServiceID = "gamelift" // Gamelift.
|
||||
GlacierServiceID = "glacier" // Glacier.
|
||||
GlueServiceID = "glue" // Glue.
|
||||
GreengrassServiceID = "greengrass" // Greengrass.
|
||||
GuarddutyServiceID = "guardduty" // Guardduty.
|
||||
HealthServiceID = "health" // Health.
|
||||
IamServiceID = "iam" // Iam.
|
||||
ImportexportServiceID = "importexport" // Importexport.
|
||||
@@ -105,12 +113,17 @@ const (
|
||||
IotServiceID = "iot" // Iot.
|
||||
KinesisServiceID = "kinesis" // Kinesis.
|
||||
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
|
||||
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
|
||||
KmsServiceID = "kms" // Kms.
|
||||
LambdaServiceID = "lambda" // Lambda.
|
||||
LightsailServiceID = "lightsail" // Lightsail.
|
||||
LogsServiceID = "logs" // Logs.
|
||||
MachinelearningServiceID = "machinelearning" // Machinelearning.
|
||||
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
|
||||
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
|
||||
MedialiveServiceID = "medialive" // Medialive.
|
||||
MediapackageServiceID = "mediapackage" // Mediapackage.
|
||||
MediastoreServiceID = "mediastore" // Mediastore.
|
||||
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
|
||||
MghServiceID = "mgh" // Mgh.
|
||||
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
|
||||
@@ -125,12 +138,18 @@ const (
|
||||
RdsServiceID = "rds" // Rds.
|
||||
RedshiftServiceID = "redshift" // Redshift.
|
||||
RekognitionServiceID = "rekognition" // Rekognition.
|
||||
ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
|
||||
Route53ServiceID = "route53" // Route53.
|
||||
Route53domainsServiceID = "route53domains" // Route53domains.
|
||||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
SagemakerServiceID = "sagemaker" // Sagemaker.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
|
||||
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
|
||||
ShieldServiceID = "shield" // Shield.
|
||||
SmsServiceID = "sms" // Sms.
|
||||
SnowballServiceID = "snowball" // Snowball.
|
||||
@@ -144,9 +163,11 @@ const (
|
||||
SupportServiceID = "support" // Support.
|
||||
SwfServiceID = "swf" // Swf.
|
||||
TaggingServiceID = "tagging" // Tagging.
|
||||
TranslateServiceID = "translate" // Translate.
|
||||
WafServiceID = "waf" // Waf.
|
||||
WafRegionalServiceID = "waf-regional" // WafRegional.
|
||||
WorkdocsServiceID = "workdocs" // Workdocs.
|
||||
WorkmailServiceID = "workmail" // Workmail.
|
||||
WorkspacesServiceID = "workspaces" // Workspaces.
|
||||
XrayServiceID = "xray" // Xray.
|
||||
)
|
||||
@@ -244,6 +265,12 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
Services: services{
|
||||
"a4b": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"acm": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -264,6 +291,22 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"acm-pca": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"api.pricing": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
@@ -370,17 +413,36 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling-plans": service{
|
||||
Defaults: endpoint{
|
||||
Hostname: "autoscaling.{region}.amazonaws.com",
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "autoscaling-plans",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-southeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"batch": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -397,11 +459,35 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"ce": service{
|
||||
PartitionEndpoint: "aws-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-global": endpoint{
|
||||
Hostname: "ce.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cloud9": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-southeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"clouddirectory": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
@@ -459,7 +545,11 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
"cloudhsmv2": service{
|
||||
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
Service: "cloudhsm",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
@@ -514,16 +604,43 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "codebuild-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "codebuild-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "codebuild-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "codebuild-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"codecommit": service{
|
||||
@@ -538,6 +655,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -577,6 +695,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -588,6 +707,7 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
@@ -648,6 +768,17 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"comprehend": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -689,9 +820,12 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@@ -917,6 +1051,7 @@ var awsPartition = partition{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -1054,6 +1189,15 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"fms": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"gamelift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1098,6 +1242,10 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -1117,6 +1265,29 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"guardduty": service{
|
||||
IsRegionalized: boxedTrue,
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1161,6 +1332,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@@ -1174,6 +1346,7 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
@@ -1212,6 +1385,16 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesisvideo": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1256,12 +1439,15 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
@@ -1300,6 +1486,62 @@ var awsPartition = partition{
|
||||
"us-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediaconvert": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"medialive": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediapackage": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"mediastore": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"metering.marketplace": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
@@ -1316,6 +1558,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -1342,7 +1585,9 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
@@ -1385,6 +1630,7 @@ var awsPartition = partition{
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
@@ -1399,9 +1645,15 @@ var awsPartition = partition{
|
||||
"opsworks-cm": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"organizations": service{
|
||||
@@ -1492,10 +1744,31 @@ var awsPartition = partition{
|
||||
"rekognition": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"resource-groups": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"route53": service{
|
||||
@@ -1526,6 +1799,16 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"runtime.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"s3": service{
|
||||
@@ -1587,6 +1870,15 @@ var awsPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"sdb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
@@ -1605,6 +1897,74 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"secretsmanager": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"serverlessrepo": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"ap-northeast-2": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"ap-south-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"ap-southeast-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"ap-southeast-2": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"ca-central-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"eu-central-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"eu-west-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"eu-west-2": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"sa-east-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"us-east-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"us-east-2": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"us-west-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"us-west-2": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"servicecatalog": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1625,6 +1985,15 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"servicediscovery": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"shield": service{
|
||||
IsRegionalized: boxedFalse,
|
||||
Defaults: endpoint{
|
||||
@@ -1641,11 +2010,14 @@ var awsPartition = partition{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -1657,7 +2029,9 @@ var awsPartition = partition{
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
@@ -1740,12 +2114,16 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -1895,6 +2273,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
@@ -1902,6 +2281,17 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"translate": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"waf": service{
|
||||
PartitionEndpoint: "aws-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
@@ -1919,8 +2309,11 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@@ -1936,15 +2329,28 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"workmail": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"workspaces": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
@@ -2240,6 +2646,13 @@ var awscnPartition = partition{
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"sms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"snowball": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2307,7 +2720,8 @@ var awscnPartition = partition{
|
||||
"tagging": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -2371,6 +2785,16 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"cloudhsmv2": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
Service: "cloudhsm",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"cloudtrail": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2430,6 +2854,18 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticache": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2458,6 +2894,12 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
},
|
||||
},
|
||||
"es": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2509,12 +2951,28 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"metering.marketplace": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
Service: "aws-marketplace",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"polly": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"rds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -2585,6 +3043,12 @@ var awsusgovPartition = partition{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"storagegateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"streams.dynamodb": service{
|
||||
Defaults: endpoint{
|
||||
CredentialScope: credentialScope{
|
||||
@@ -2609,6 +3073,12 @@ var awsusgovPartition = partition{
|
||||
},
|
||||
"swf": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"tagging": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
|
||||
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
22
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
@@ -206,10 +206,11 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (
|
||||
// enumerating over the regions in a partition.
|
||||
func (p Partition) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
for id := range p.p.Regions {
|
||||
for id, r := range p.p.Regions {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
p: p.p,
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,6 +241,10 @@ type Region struct {
|
||||
// ID returns the region's identifier.
|
||||
func (r Region) ID() string { return r.id }
|
||||
|
||||
// Description returns the region's description. The region description
|
||||
// is free text, it can be empty, and it may change between SDK releases.
|
||||
func (r Region) Description() string { return r.desc }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of the region given
|
||||
// a service. See Partition.EndpointFor for usage and errors that can be returned.
|
||||
func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
@@ -284,10 +289,11 @@ func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (Resolve
|
||||
func (s Service) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
for id := range s.p.Services[s.id].Endpoints {
|
||||
if _, ok := s.p.Regions[id]; ok {
|
||||
if r, ok := s.p.Regions[id]; ok {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
p: s.p,
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: s.p,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -347,6 +353,10 @@ type ResolvedEndpoint struct {
|
||||
// The service name that should be used for signing requests.
|
||||
SigningName string
|
||||
|
||||
// States that the signing name for this endpoint was derived from metadata
|
||||
// passed in, but was not explicitly modeled.
|
||||
SigningNameDerived bool
|
||||
|
||||
// The signing method that should be used for signing requests.
|
||||
SigningMethod string
|
||||
}
|
||||
|
||||
12
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
@@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
|
||||
if len(signingRegion) == 0 {
|
||||
signingRegion = region
|
||||
}
|
||||
|
||||
signingName := e.CredentialScope.Service
|
||||
var signingNameDerived bool
|
||||
if len(signingName) == 0 {
|
||||
signingName = service
|
||||
signingNameDerived = true
|
||||
}
|
||||
|
||||
return ResolvedEndpoint{
|
||||
URL: u,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
URL: u,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningNameDerived: signingNameDerived,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
@@ -3,6 +3,8 @@ package request
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||
@@ -15,7 +17,7 @@ type offsetReader struct {
|
||||
|
||||
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
||||
reader := &offsetReader{}
|
||||
buf.Seek(offset, 0)
|
||||
buf.Seek(offset, sdkio.SeekStart)
|
||||
|
||||
reader.buf = buf
|
||||
return reader
|
||||
|
||||
53
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -224,6 +225,9 @@ func (r *Request) SetContext(ctx aws.Context) {
|
||||
|
||||
// WillRetry returns if the request's can be retried.
|
||||
func (r *Request) WillRetry() bool {
|
||||
if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
|
||||
return false
|
||||
}
|
||||
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
|
||||
}
|
||||
|
||||
@@ -255,6 +259,7 @@ func (r *Request) SetStringBody(s string) {
|
||||
// SetReaderBody will set the request's body reader.
|
||||
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||
r.Body = reader
|
||||
r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
|
||||
r.ResetBody()
|
||||
}
|
||||
|
||||
@@ -292,6 +297,11 @@ func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, err
|
||||
return getPresignedURL(r, expire)
|
||||
}
|
||||
|
||||
// IsPresigned returns true if the request represents a presigned API url.
|
||||
func (r *Request) IsPresigned() bool {
|
||||
return r.ExpireTime != 0
|
||||
}
|
||||
|
||||
func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
|
||||
if expire <= 0 {
|
||||
return "", nil, awserr.New(
|
||||
@@ -332,7 +342,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
||||
|
||||
// Build will build the request's object so it can be signed and sent
|
||||
// to the service. Build will also validate all the request's parameters.
|
||||
// Anny additional build Handlers set on this request will be run
|
||||
// Any additional build Handlers set on this request will be run
|
||||
// in the order they were set.
|
||||
//
|
||||
// The request will only be built once. Multiple calls to build will have
|
||||
@@ -393,7 +403,7 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
||||
// of the SDK if they used that field.
|
||||
//
|
||||
// Related golang/go#18257
|
||||
l, err := computeBodyLength(r.Body)
|
||||
l, err := aws.SeekerLen(r.Body)
|
||||
if err != nil {
|
||||
return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
|
||||
}
|
||||
@@ -411,7 +421,8 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
||||
// Transfer-Encoding: chunked bodies for these methods.
|
||||
//
|
||||
// This would only happen if a aws.ReaderSeekerCloser was used with
|
||||
// a io.Reader that was not also an io.Seeker.
|
||||
// a io.Reader that was not also an io.Seeker, or did not implement
|
||||
// Len() method.
|
||||
switch r.Operation.HTTPMethod {
|
||||
case "GET", "HEAD", "DELETE":
|
||||
body = NoBody
|
||||
@@ -423,42 +434,6 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// Attempts to compute the length of the body of the reader using the
|
||||
// io.Seeker interface. If the value is not seekable because of being
|
||||
// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
|
||||
// If no error occurs the length of the body will be returned.
|
||||
func computeBodyLength(r io.ReadSeeker) (int64, error) {
|
||||
seekable := true
|
||||
// Determine if the seeker is actually seekable. ReaderSeekerCloser
|
||||
// hides the fact that a io.Readers might not actually be seekable.
|
||||
switch v := r.(type) {
|
||||
case aws.ReaderSeekerCloser:
|
||||
seekable = v.IsSeeker()
|
||||
case *aws.ReaderSeekerCloser:
|
||||
seekable = v.IsSeeker()
|
||||
}
|
||||
if !seekable {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
curOffset, err := r.Seek(0, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
endOffset, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err = r.Seek(curOffset, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return endOffset - curOffset, nil
|
||||
}
|
||||
|
||||
// GetBody will return an io.ReadSeeker of the Request's underlying
|
||||
// input body with a concurrency safe wrapper.
|
||||
func (r *Request) GetBody() io.ReadSeeker {
|
||||
|
||||
25
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
25
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
generated
vendored
@@ -142,13 +142,28 @@ func (r *Request) nextPageTokens() []interface{} {
|
||||
tokens := []interface{}{}
|
||||
tokenAdded := false
|
||||
for _, outToken := range r.Operation.OutputTokens {
|
||||
v, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||
if len(v) > 0 {
|
||||
tokens = append(tokens, v[0])
|
||||
tokenAdded = true
|
||||
} else {
|
||||
vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
|
||||
if len(vs) == 0 {
|
||||
tokens = append(tokens, nil)
|
||||
continue
|
||||
}
|
||||
v := vs[0]
|
||||
|
||||
switch tv := v.(type) {
|
||||
case *string:
|
||||
if len(aws.StringValue(tv)) == 0 {
|
||||
tokens = append(tokens, nil)
|
||||
continue
|
||||
}
|
||||
case string:
|
||||
if len(tv) == 0 {
|
||||
tokens = append(tokens, nil)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
tokenAdded = true
|
||||
tokens = append(tokens, v)
|
||||
}
|
||||
if !tokenAdded {
|
||||
return nil
|
||||
|
||||
8
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/defaults"
|
||||
)
|
||||
|
||||
// EnvProviderName provides a name of the provider when config is loaded from environment.
|
||||
@@ -176,6 +177,13 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
||||
setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
|
||||
setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
|
||||
|
||||
if len(cfg.SharedCredentialsFile) == 0 {
|
||||
cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
|
||||
}
|
||||
if len(cfg.SharedConfigFile) == 0 {
|
||||
cfg.SharedConfigFile = defaults.SharedConfigFilename()
|
||||
}
|
||||
|
||||
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
|
||||
|
||||
return cfg
|
||||
|
||||
38
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
38
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
// Sessions are safe to create service clients concurrently, but it is not safe
|
||||
// to mutate the Session concurrently.
|
||||
//
|
||||
// The Session satisfies the service client's client.ClientConfigProvider.
|
||||
// The Session satisfies the service client's client.ConfigProvider.
|
||||
type Session struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
@@ -58,7 +58,12 @@ func New(cfgs ...*aws.Config) *Session {
|
||||
envCfg := loadEnvConfig()
|
||||
|
||||
if envCfg.EnableSharedConfig {
|
||||
s, err := newSession(Options{}, envCfg, cfgs...)
|
||||
var cfg aws.Config
|
||||
cfg.MergeIn(cfgs...)
|
||||
s, err := NewSessionWithOptions(Options{
|
||||
Config: cfg,
|
||||
SharedConfigState: SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
// Old session.New expected all errors to be discovered when
|
||||
// a request is made, and would report the errors then. This
|
||||
@@ -243,13 +248,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
|
||||
envCfg.EnableSharedConfig = true
|
||||
}
|
||||
|
||||
if len(envCfg.SharedCredentialsFile) == 0 {
|
||||
envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
|
||||
}
|
||||
if len(envCfg.SharedConfigFile) == 0 {
|
||||
envCfg.SharedConfigFile = defaults.SharedConfigFilename()
|
||||
}
|
||||
|
||||
// Only use AWS_CA_BUNDLE if session option is not provided.
|
||||
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
|
||||
f, err := os.Open(envCfg.CustomCABundle)
|
||||
@@ -573,11 +571,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (
|
||||
}
|
||||
|
||||
return client.Config{
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningName: resolved.SigningName,
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningNameDerived: resolved.SigningNameDerived,
|
||||
SigningName: resolved.SigningName,
|
||||
}, err
|
||||
}
|
||||
|
||||
@@ -597,10 +596,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
|
||||
}
|
||||
|
||||
return client.Config{
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningName: resolved.SigningName,
|
||||
Config: s.Config,
|
||||
Handlers: s.Handlers,
|
||||
Endpoint: resolved.URL,
|
||||
SigningRegion: resolved.SigningRegion,
|
||||
SigningNameDerived: resolved.SigningNameDerived,
|
||||
SigningName: resolved.SigningName,
|
||||
}
|
||||
}
|
||||
|
||||
24
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@@ -71,6 +71,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
@@ -341,7 +342,9 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
|
||||
|
||||
ctx.sanitizeHostForHeader()
|
||||
ctx.assignAmzQueryValues()
|
||||
ctx.build(v4.DisableHeaderHoisting)
|
||||
if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the request is not presigned the body should be attached to it. This
|
||||
// prevents the confusion of wanting to send a signed request without
|
||||
@@ -503,11 +506,13 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
||||
v4.Logger.Log(msg)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
||||
func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
|
||||
ctx.buildTime() // no depends
|
||||
ctx.buildCredentialString() // no depends
|
||||
|
||||
ctx.buildBodyDigest()
|
||||
if err := ctx.buildBodyDigest(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unsignedHeaders := ctx.Request.Header
|
||||
if ctx.isPresign {
|
||||
@@ -535,6 +540,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
||||
}
|
||||
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildTime() {
|
||||
@@ -661,7 +668,7 @@ func (ctx *signingCtx) buildSignature() {
|
||||
ctx.signature = hex.EncodeToString(signature)
|
||||
}
|
||||
|
||||
func (ctx *signingCtx) buildBodyDigest() {
|
||||
func (ctx *signingCtx) buildBodyDigest() error {
|
||||
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||
if hash == "" {
|
||||
if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
|
||||
@@ -669,6 +676,9 @@ func (ctx *signingCtx) buildBodyDigest() {
|
||||
} else if ctx.Body == nil {
|
||||
hash = emptyStringSHA256
|
||||
} else {
|
||||
if !aws.IsReaderSeekable(ctx.Body) {
|
||||
return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
|
||||
}
|
||||
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
||||
}
|
||||
if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
|
||||
@@ -676,6 +686,8 @@ func (ctx *signingCtx) buildBodyDigest() {
|
||||
}
|
||||
}
|
||||
ctx.bodyDigest = hash
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRequestSigned returns if the request is currently signed or presigned
|
||||
@@ -715,8 +727,8 @@ func makeSha256(data []byte) []byte {
|
||||
|
||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||
hash := sha256.New()
|
||||
start, _ := reader.Seek(0, 1)
|
||||
defer reader.Seek(start, 0)
|
||||
start, _ := reader.Seek(0, sdkio.SeekCurrent)
|
||||
defer reader.Seek(start, sdkio.SeekStart)
|
||||
|
||||
io.Copy(hash, reader)
|
||||
return hash.Sum(nil)
|
||||
|
||||
83
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
83
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
@@ -3,6 +3,8 @@ package aws
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
|
||||
@@ -22,6 +24,22 @@ type ReaderSeekerCloser struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// IsReaderSeekable returns if the underlying reader type can be seeked. A
|
||||
// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
|
||||
// type.
|
||||
func IsReaderSeekable(r io.Reader) bool {
|
||||
switch v := r.(type) {
|
||||
case ReaderSeekerCloser:
|
||||
return v.IsSeeker()
|
||||
case *ReaderSeekerCloser:
|
||||
return v.IsSeeker()
|
||||
case io.ReadSeeker:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads from the reader up to size of p. The number of bytes read, and
|
||||
// error if it occurred will be returned.
|
||||
//
|
||||
@@ -56,6 +74,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// HasLen returns the length of the underlying reader if the value implements
|
||||
// the Len() int method.
|
||||
func (r ReaderSeekerCloser) HasLen() (int, bool) {
|
||||
type lenner interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
if lr, ok := r.r.(lenner); ok {
|
||||
return lr.Len(), true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// GetLen returns the length of the bytes remaining in the underlying reader.
|
||||
// Checks first for Len(), then io.Seeker to determine the size of the
|
||||
// underlying reader.
|
||||
//
|
||||
// Will return -1 if the length cannot be determined.
|
||||
func (r ReaderSeekerCloser) GetLen() (int64, error) {
|
||||
if l, ok := r.HasLen(); ok {
|
||||
return int64(l), nil
|
||||
}
|
||||
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
return seekerLen(s)
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// SeekerLen attempts to get the number of bytes remaining at the seeker's
|
||||
// current position. Returns the number of bytes remaining or error.
|
||||
func SeekerLen(s io.Seeker) (int64, error) {
|
||||
// Determine if the seeker is actually seekable. ReaderSeekerCloser
|
||||
// hides the fact that a io.Readers might not actually be seekable.
|
||||
switch v := s.(type) {
|
||||
case ReaderSeekerCloser:
|
||||
return v.GetLen()
|
||||
case *ReaderSeekerCloser:
|
||||
return v.GetLen()
|
||||
}
|
||||
|
||||
return seekerLen(s)
|
||||
}
|
||||
|
||||
func seekerLen(s io.Seeker) (int64, error) {
|
||||
curOffset, err := s.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
endOffset, err := s.Seek(0, sdkio.SeekEnd)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, err = s.Seek(curOffset, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return endOffset - curOffset, nil
|
||||
}
|
||||
|
||||
// Close closes the ReaderSeekerCloser.
|
||||
//
|
||||
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.12.67"
|
||||
const SDKVersion = "1.13.49"
|
||||
|
||||
10
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
generated
vendored
Normal file
10
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build !go1.7
|
||||
|
||||
package sdkio
|
||||
|
||||
// Copy of Go 1.7 io package's Seeker constants.
|
||||
const (
|
||||
SeekStart = 0 // seek relative to the origin of the file
|
||||
SeekCurrent = 1 // seek relative to the current offset
|
||||
SeekEnd = 2 // seek relative to the end
|
||||
)
|
||||
12
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
generated
vendored
Normal file
12
vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build go1.7
|
||||
|
||||
package sdkio
|
||||
|
||||
import "io"
|
||||
|
||||
// Alias for Go 1.7 io package Seeker constants
|
||||
const (
|
||||
SeekStart = io.SeekStart // seek relative to the origin of the file
|
||||
SeekCurrent = io.SeekCurrent // seek relative to the current offset
|
||||
SeekEnd = io.SeekEnd // seek relative to the end
|
||||
)
|
||||
29
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
generated
vendored
Normal file
29
vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
package sdkrand
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// lockedSource is a thread-safe implementation of rand.Source
|
||||
type lockedSource struct {
|
||||
lk sync.Mutex
|
||||
src rand.Source
|
||||
}
|
||||
|
||||
func (r *lockedSource) Int63() (n int64) {
|
||||
r.lk.Lock()
|
||||
n = r.src.Int63()
|
||||
r.lk.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *lockedSource) Seed(seed int64) {
|
||||
r.lk.Lock()
|
||||
r.src.Seed(seed)
|
||||
r.lk.Unlock()
|
||||
}
|
||||
|
||||
// SeededRand is a new RNG using a thread safe implementation of rand.Source
|
||||
var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||
2
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go
generated
vendored
@@ -24,7 +24,7 @@ func Build(r *request.Request) {
|
||||
r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err)
|
||||
}
|
||||
|
||||
if r.ExpireTime == 0 {
|
||||
if !r.IsPresigned() {
|
||||
r.HTTPRequest.Method = "POST"
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
r.SetBufferBody([]byte(body.Encode()))
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
generated
vendored
@@ -25,7 +25,7 @@ func Build(r *request.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.ExpireTime == 0 {
|
||||
if !r.IsPresigned() {
|
||||
r.HTTPRequest.Method = "POST"
|
||||
r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
||||
r.SetBufferBody([]byte(body.Encode()))
|
||||
|
||||
10
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
10
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
generated
vendored
@@ -52,9 +52,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
|
||||
if t == "" {
|
||||
switch rtype.Kind() {
|
||||
case reflect.Struct:
|
||||
t = "structure"
|
||||
// also it can't be a time object
|
||||
if _, ok := r.Interface().(*time.Time); !ok {
|
||||
t = "structure"
|
||||
}
|
||||
case reflect.Slice:
|
||||
t = "list"
|
||||
// also it can't be a byte slice
|
||||
if _, ok := r.Interface().([]byte); !ok {
|
||||
t = "list"
|
||||
}
|
||||
case reflect.Map:
|
||||
t = "map"
|
||||
}
|
||||
|
||||
674
vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go
generated
vendored
674
vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4560
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
4560
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
53
vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go
generated
vendored
@@ -5,11 +5,64 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
)
|
||||
|
||||
type retryer struct {
|
||||
client.DefaultRetryer
|
||||
}
|
||||
|
||||
func (d retryer) RetryRules(r *request.Request) time.Duration {
|
||||
switch r.Operation.Name {
|
||||
case opModifyNetworkInterfaceAttribute:
|
||||
fallthrough
|
||||
case opAssignPrivateIpAddresses:
|
||||
return customRetryRule(r)
|
||||
default:
|
||||
return d.DefaultRetryer.RetryRules(r)
|
||||
}
|
||||
}
|
||||
|
||||
func customRetryRule(r *request.Request) time.Duration {
|
||||
retryTimes := []time.Duration{
|
||||
time.Second,
|
||||
3 * time.Second,
|
||||
5 * time.Second,
|
||||
}
|
||||
|
||||
count := r.RetryCount
|
||||
if count >= len(retryTimes) {
|
||||
count = len(retryTimes) - 1
|
||||
}
|
||||
|
||||
minTime := int(retryTimes[count])
|
||||
return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime)
|
||||
}
|
||||
|
||||
func setCustomRetryer(c *client.Client) {
|
||||
maxRetries := aws.IntValue(c.Config.MaxRetries)
|
||||
if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||
maxRetries = 3
|
||||
}
|
||||
|
||||
c.Retryer = retryer{
|
||||
DefaultRetryer: client.DefaultRetryer{
|
||||
NumMaxRetries: maxRetries,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
initClient = func(c *client.Client) {
|
||||
if c.Config.Retryer == nil {
|
||||
// Only override the retryer with a custom one if the config
|
||||
// does not already contain a retryer
|
||||
setCustomRetryer(c)
|
||||
}
|
||||
}
|
||||
initRequest = func(r *request.Request) {
|
||||
if r.Operation.Name == opCopySnapshot { // fill the PresignedURL parameter
|
||||
r.Handlers.Build.PushFront(fillPresignedURL)
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go
generated
vendored
@@ -4,7 +4,7 @@
|
||||
// requests to Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity
|
||||
// in the AWS Cloud. Using Amazon EC2 eliminates your need to invest in hardware
|
||||
// in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware
|
||||
// up front, so you can develop and deploy applications faster.
|
||||
//
|
||||
// See https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15 for more information on this service.
|
||||
|
||||
32
vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go/service/ec2/ec2iface/interface.go
generated
vendored
@@ -208,6 +208,10 @@ type EC2API interface {
|
||||
CreateEgressOnlyInternetGatewayWithContext(aws.Context, *ec2.CreateEgressOnlyInternetGatewayInput, ...request.Option) (*ec2.CreateEgressOnlyInternetGatewayOutput, error)
|
||||
CreateEgressOnlyInternetGatewayRequest(*ec2.CreateEgressOnlyInternetGatewayInput) (*request.Request, *ec2.CreateEgressOnlyInternetGatewayOutput)
|
||||
|
||||
CreateFleet(*ec2.CreateFleetInput) (*ec2.CreateFleetOutput, error)
|
||||
CreateFleetWithContext(aws.Context, *ec2.CreateFleetInput, ...request.Option) (*ec2.CreateFleetOutput, error)
|
||||
CreateFleetRequest(*ec2.CreateFleetInput) (*request.Request, *ec2.CreateFleetOutput)
|
||||
|
||||
CreateFlowLogs(*ec2.CreateFlowLogsInput) (*ec2.CreateFlowLogsOutput, error)
|
||||
CreateFlowLogsWithContext(aws.Context, *ec2.CreateFlowLogsInput, ...request.Option) (*ec2.CreateFlowLogsOutput, error)
|
||||
CreateFlowLogsRequest(*ec2.CreateFlowLogsInput) (*request.Request, *ec2.CreateFlowLogsOutput)
|
||||
@@ -344,6 +348,10 @@ type EC2API interface {
|
||||
DeleteEgressOnlyInternetGatewayWithContext(aws.Context, *ec2.DeleteEgressOnlyInternetGatewayInput, ...request.Option) (*ec2.DeleteEgressOnlyInternetGatewayOutput, error)
|
||||
DeleteEgressOnlyInternetGatewayRequest(*ec2.DeleteEgressOnlyInternetGatewayInput) (*request.Request, *ec2.DeleteEgressOnlyInternetGatewayOutput)
|
||||
|
||||
DeleteFleets(*ec2.DeleteFleetsInput) (*ec2.DeleteFleetsOutput, error)
|
||||
DeleteFleetsWithContext(aws.Context, *ec2.DeleteFleetsInput, ...request.Option) (*ec2.DeleteFleetsOutput, error)
|
||||
DeleteFleetsRequest(*ec2.DeleteFleetsInput) (*request.Request, *ec2.DeleteFleetsOutput)
|
||||
|
||||
DeleteFlowLogs(*ec2.DeleteFlowLogsInput) (*ec2.DeleteFlowLogsOutput, error)
|
||||
DeleteFlowLogsWithContext(aws.Context, *ec2.DeleteFlowLogsInput, ...request.Option) (*ec2.DeleteFlowLogsOutput, error)
|
||||
DeleteFlowLogsRequest(*ec2.DeleteFlowLogsInput) (*request.Request, *ec2.DeleteFlowLogsOutput)
|
||||
@@ -468,6 +476,10 @@ type EC2API interface {
|
||||
DescribeAddressesWithContext(aws.Context, *ec2.DescribeAddressesInput, ...request.Option) (*ec2.DescribeAddressesOutput, error)
|
||||
DescribeAddressesRequest(*ec2.DescribeAddressesInput) (*request.Request, *ec2.DescribeAddressesOutput)
|
||||
|
||||
DescribeAggregateIdFormat(*ec2.DescribeAggregateIdFormatInput) (*ec2.DescribeAggregateIdFormatOutput, error)
|
||||
DescribeAggregateIdFormatWithContext(aws.Context, *ec2.DescribeAggregateIdFormatInput, ...request.Option) (*ec2.DescribeAggregateIdFormatOutput, error)
|
||||
DescribeAggregateIdFormatRequest(*ec2.DescribeAggregateIdFormatInput) (*request.Request, *ec2.DescribeAggregateIdFormatOutput)
|
||||
|
||||
DescribeAvailabilityZones(*ec2.DescribeAvailabilityZonesInput) (*ec2.DescribeAvailabilityZonesOutput, error)
|
||||
DescribeAvailabilityZonesWithContext(aws.Context, *ec2.DescribeAvailabilityZonesInput, ...request.Option) (*ec2.DescribeAvailabilityZonesOutput, error)
|
||||
DescribeAvailabilityZonesRequest(*ec2.DescribeAvailabilityZonesInput) (*request.Request, *ec2.DescribeAvailabilityZonesOutput)
|
||||
@@ -504,6 +516,18 @@ type EC2API interface {
|
||||
DescribeExportTasksWithContext(aws.Context, *ec2.DescribeExportTasksInput, ...request.Option) (*ec2.DescribeExportTasksOutput, error)
|
||||
DescribeExportTasksRequest(*ec2.DescribeExportTasksInput) (*request.Request, *ec2.DescribeExportTasksOutput)
|
||||
|
||||
DescribeFleetHistory(*ec2.DescribeFleetHistoryInput) (*ec2.DescribeFleetHistoryOutput, error)
|
||||
DescribeFleetHistoryWithContext(aws.Context, *ec2.DescribeFleetHistoryInput, ...request.Option) (*ec2.DescribeFleetHistoryOutput, error)
|
||||
DescribeFleetHistoryRequest(*ec2.DescribeFleetHistoryInput) (*request.Request, *ec2.DescribeFleetHistoryOutput)
|
||||
|
||||
DescribeFleetInstances(*ec2.DescribeFleetInstancesInput) (*ec2.DescribeFleetInstancesOutput, error)
|
||||
DescribeFleetInstancesWithContext(aws.Context, *ec2.DescribeFleetInstancesInput, ...request.Option) (*ec2.DescribeFleetInstancesOutput, error)
|
||||
DescribeFleetInstancesRequest(*ec2.DescribeFleetInstancesInput) (*request.Request, *ec2.DescribeFleetInstancesOutput)
|
||||
|
||||
DescribeFleets(*ec2.DescribeFleetsInput) (*ec2.DescribeFleetsOutput, error)
|
||||
DescribeFleetsWithContext(aws.Context, *ec2.DescribeFleetsInput, ...request.Option) (*ec2.DescribeFleetsOutput, error)
|
||||
DescribeFleetsRequest(*ec2.DescribeFleetsInput) (*request.Request, *ec2.DescribeFleetsOutput)
|
||||
|
||||
DescribeFlowLogs(*ec2.DescribeFlowLogsInput) (*ec2.DescribeFlowLogsOutput, error)
|
||||
DescribeFlowLogsWithContext(aws.Context, *ec2.DescribeFlowLogsInput, ...request.Option) (*ec2.DescribeFlowLogsOutput, error)
|
||||
DescribeFlowLogsRequest(*ec2.DescribeFlowLogsInput) (*request.Request, *ec2.DescribeFlowLogsOutput)
|
||||
@@ -629,6 +653,10 @@ type EC2API interface {
|
||||
DescribePrefixListsWithContext(aws.Context, *ec2.DescribePrefixListsInput, ...request.Option) (*ec2.DescribePrefixListsOutput, error)
|
||||
DescribePrefixListsRequest(*ec2.DescribePrefixListsInput) (*request.Request, *ec2.DescribePrefixListsOutput)
|
||||
|
||||
DescribePrincipalIdFormat(*ec2.DescribePrincipalIdFormatInput) (*ec2.DescribePrincipalIdFormatOutput, error)
|
||||
DescribePrincipalIdFormatWithContext(aws.Context, *ec2.DescribePrincipalIdFormatInput, ...request.Option) (*ec2.DescribePrincipalIdFormatOutput, error)
|
||||
DescribePrincipalIdFormatRequest(*ec2.DescribePrincipalIdFormatInput) (*request.Request, *ec2.DescribePrincipalIdFormatOutput)
|
||||
|
||||
DescribeRegions(*ec2.DescribeRegionsInput) (*ec2.DescribeRegionsOutput, error)
|
||||
DescribeRegionsWithContext(aws.Context, *ec2.DescribeRegionsInput, ...request.Option) (*ec2.DescribeRegionsOutput, error)
|
||||
DescribeRegionsRequest(*ec2.DescribeRegionsInput) (*request.Request, *ec2.DescribeRegionsOutput)
|
||||
@@ -917,6 +945,10 @@ type EC2API interface {
|
||||
ImportVolumeWithContext(aws.Context, *ec2.ImportVolumeInput, ...request.Option) (*ec2.ImportVolumeOutput, error)
|
||||
ImportVolumeRequest(*ec2.ImportVolumeInput) (*request.Request, *ec2.ImportVolumeOutput)
|
||||
|
||||
ModifyFleet(*ec2.ModifyFleetInput) (*ec2.ModifyFleetOutput, error)
|
||||
ModifyFleetWithContext(aws.Context, *ec2.ModifyFleetInput, ...request.Option) (*ec2.ModifyFleetOutput, error)
|
||||
ModifyFleetRequest(*ec2.ModifyFleetInput) (*request.Request, *ec2.ModifyFleetOutput)
|
||||
|
||||
ModifyFpgaImageAttribute(*ec2.ModifyFpgaImageAttributeInput) (*ec2.ModifyFpgaImageAttributeOutput, error)
|
||||
ModifyFpgaImageAttributeWithContext(aws.Context, *ec2.ModifyFpgaImageAttributeInput, ...request.Option) (*ec2.ModifyFpgaImageAttributeOutput, error)
|
||||
ModifyFpgaImageAttributeRequest(*ec2.ModifyFpgaImageAttributeInput) (*request.Request, *ec2.ModifyFpgaImageAttributeOutput)
|
||||
|
||||
547
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
547
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Normal file
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
contentMD5Header = "Content-Md5"
|
||||
contentSha256Header = "X-Amz-Content-Sha256"
|
||||
amzTeHeader = "X-Amz-Te"
|
||||
amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
|
||||
|
||||
appendMD5TxEncoding = "append-md5"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
if !aws.IsReaderSeekable(r.Body) {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(
|
||||
"Unable to compute Content-MD5 for unseekable body, S3.%s",
|
||||
r.Operation.Name))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(h, r.Body); err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
r.HTTPRequest.Header.Set(contentMD5Header, v)
|
||||
}
|
||||
|
||||
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
|
||||
// request. If the body is not seekable or S3DisableContentMD5Validation set
|
||||
// this handler will be ignored.
|
||||
func computeBodyHashes(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
|
||||
return
|
||||
}
|
||||
|
||||
var md5Hash, sha256Hash hash.Hash
|
||||
hashers := make([]io.Writer, 0, 2)
|
||||
|
||||
// Determine upfront which hashes can be set without overriding user
|
||||
// provide header data.
|
||||
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
|
||||
md5Hash = md5.New()
|
||||
hashers = append(hashers, md5Hash)
|
||||
}
|
||||
|
||||
if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
|
||||
sha256Hash = sha256.New()
|
||||
hashers = append(hashers, sha256Hash)
|
||||
}
|
||||
|
||||
// Create the destination writer based on the hashes that are not already
|
||||
// provided by the user.
|
||||
var dst io.Writer
|
||||
switch len(hashers) {
|
||||
case 0:
|
||||
return
|
||||
case 1:
|
||||
dst = hashers[0]
|
||||
default:
|
||||
dst = io.MultiWriter(hashers...)
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(dst, r.Body); err != nil {
|
||||
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
|
||||
return
|
||||
}
|
||||
|
||||
// For the hashes created, set the associated headers that the user did not
|
||||
// already provide.
|
||||
if md5Hash != nil {
|
||||
sum := make([]byte, md5.Size)
|
||||
encoded := make([]byte, md5Base64EncLen)
|
||||
|
||||
base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
|
||||
}
|
||||
|
||||
if sha256Hash != nil {
|
||||
encoded := make([]byte, sha256HexEncLen)
|
||||
sum := make([]byte, sha256.Size)
|
||||
|
||||
hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
|
||||
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
|
||||
)
|
||||
|
||||
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
|
||||
curPos, err := src.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
n, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
_, err = src.Seek(curPos, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Adds the x-amz-te: append_md5 header to the request. This requests the service
|
||||
// responds with a trailing MD5 checksum.
|
||||
//
|
||||
// Will not ask for append MD5 if disabled, the request is presigned or,
|
||||
// or the API operation does not support content MD5 validation.
|
||||
func askForTxEncodingAppendMD5(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
|
||||
}
|
||||
|
||||
func useMD5ValidationReader(r *request.Request) {
|
||||
if r.Error != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyReader *io.ReadCloser
|
||||
var contentLen int64
|
||||
switch tv := r.Data.(type) {
|
||||
case *GetObjectOutput:
|
||||
bodyReader = &tv.Body
|
||||
contentLen = aws.Int64Value(tv.ContentLength)
|
||||
// Update ContentLength hiden the trailing MD5 checksum.
|
||||
tv.ContentLength = aws.Int64(contentLen - md5.Size)
|
||||
tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
|
||||
default:
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("%s: %s header received on unsupported API, %s",
|
||||
amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if contentLen < md5.Size {
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("invalid Content-Length %d for %s %s",
|
||||
contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap and swap the response body reader with the validation reader.
|
||||
*bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
|
||||
}
|
||||
|
||||
type md5ValidationReader struct {
|
||||
rawReader io.ReadCloser
|
||||
payload io.Reader
|
||||
hash hash.Hash
|
||||
|
||||
payloadLen int64
|
||||
read int64
|
||||
}
|
||||
|
||||
func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
|
||||
h := md5.New()
|
||||
return &md5ValidationReader{
|
||||
rawReader: reader,
|
||||
payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
|
||||
hash: h,
|
||||
payloadLen: payloadLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
|
||||
n, err = v.payload.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
v.read += int64(n)
|
||||
|
||||
if err == io.EOF {
|
||||
if v.read != v.payloadLen {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
expectSum := make([]byte, md5.Size)
|
||||
actualSum := make([]byte, md5.Size)
|
||||
if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
|
||||
return n, sumReadErr
|
||||
}
|
||||
actualSum = v.hash.Sum(actualSum[0:0])
|
||||
if !bytes.Equal(expectSum, actualSum) {
|
||||
return n, awserr.New("InvalidChecksum",
|
||||
fmt.Sprintf("expected MD5 checksum %s, got %s",
|
||||
hex.EncodeToString(expectSum),
|
||||
hex.EncodeToString(actualSum),
|
||||
),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Close() error {
|
||||
return v.rawReader.Close()
|
||||
}
|
||||
36
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
generated
vendored
@@ -1,36 +0,0 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
_, err := io.Copy(h, r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to read body", err)
|
||||
return
|
||||
}
|
||||
_, err = r.Body.Seek(0, 0)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to seek body", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
sum := h.Sum(nil)
|
||||
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
|
||||
base64.StdEncoding.Encode(sum64, sum)
|
||||
r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
|
||||
}
|
||||
6
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
@@ -42,6 +42,12 @@ func defaultInitRequestFn(r *request.Request) {
|
||||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
case opPutObject, opUploadPart:
|
||||
r.Handlers.Build.PushBack(computeBodyHashes)
|
||||
// Disabled until #1837 root issue is resolved.
|
||||
// case opGetObject:
|
||||
// r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
|
||||
// r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
3
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
@@ -17,7 +18,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
}
|
||||
body := bytes.NewReader(b)
|
||||
r.HTTPResponse.Body = ioutil.NopCloser(body)
|
||||
defer body.Seek(0, 0)
|
||||
defer body.Seek(0, sdkio.SeekStart)
|
||||
|
||||
if body.Len() == 0 {
|
||||
// If there is no body don't attempt to parse the body.
|
||||
|
||||
168
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
168
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
@@ -14,7 +14,7 @@ const opAssumeRole = "AssumeRole"
|
||||
|
||||
// AssumeRoleRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the AssumeRole operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -88,9 +88,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
||||
// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The temporary security credentials are valid for the duration that you specified
|
||||
// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
|
||||
// maximum of 3600 seconds (1 hour). The default is 1 hour.
|
||||
// By default, the temporary security credentials created by AssumeRole last
|
||||
// for one hour. However, you can use the optional DurationSeconds parameter
|
||||
// to specify the duration of your session. You can provide a value from 900
|
||||
// seconds (15 minutes) up to the maximum session duration setting for the role.
|
||||
// This setting can have a value from 1 hour to 12 hours. To learn how to view
|
||||
// the maximum value for your role, see View the Maximum Session Duration Setting
|
||||
// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide. The maximum session duration limit applies when you
|
||||
// use the AssumeRole* API operations or the assume-role* CLI operations but
|
||||
// does not apply when you use those operations to create a console URL. For
|
||||
// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The temporary security credentials created by AssumeRole can be used to make
|
||||
// API calls to any AWS service with the following exception: you cannot call
|
||||
@@ -121,7 +130,12 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
||||
// the user to call AssumeRole on the ARN of the role in the other account.
|
||||
// If the user is in the same account as the role, then you can either attach
|
||||
// a policy to the user (identical to the previous different account user),
|
||||
// or you can add the user as a principal directly in the role's trust policy
|
||||
// or you can add the user as a principal directly in the role's trust policy.
|
||||
// In this case, the trust policy acts as the only resource-based policy in
|
||||
// IAM, and users in the same account as the role do not need explicit permission
|
||||
// to assume the role. For more information about trust policies and resource-based
|
||||
// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Using MFA with AssumeRole
|
||||
//
|
||||
@@ -194,7 +208,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
|
||||
|
||||
// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the AssumeRoleWithSAML operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -247,11 +261,20 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
||||
// an access key ID, a secret access key, and a security token. Applications
|
||||
// can use these temporary security credentials to sign calls to AWS services.
|
||||
//
|
||||
// The temporary security credentials are valid for the duration that you specified
|
||||
// when calling AssumeRole, or until the time specified in the SAML authentication
|
||||
// response's SessionNotOnOrAfter value, whichever is shorter. The duration
|
||||
// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
|
||||
// The default is 1 hour.
|
||||
// By default, the temporary security credentials created by AssumeRoleWithSAML
|
||||
// last for one hour. However, you can use the optional DurationSeconds parameter
|
||||
// to specify the duration of your session. Your role session lasts for the
|
||||
// duration that you specify, or until the time specified in the SAML authentication
|
||||
// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
|
||||
// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
|
||||
// duration setting for the role. This setting can have a value from 1 hour
|
||||
// to 12 hours. To learn how to view the maximum value for your role, see View
|
||||
// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide. The maximum session duration limit applies when you
|
||||
// use the AssumeRole* API operations or the assume-role* CLI operations but
|
||||
// does not apply when you use those operations to create a console URL. For
|
||||
// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The temporary security credentials created by AssumeRoleWithSAML can be used
|
||||
// to make API calls to any AWS service with the following exception: you cannot
|
||||
@@ -367,7 +390,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
|
||||
|
||||
// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -438,9 +461,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
||||
// key ID, a secret access key, and a security token. Applications can use these
|
||||
// temporary security credentials to sign calls to AWS service APIs.
|
||||
//
|
||||
// The credentials are valid for the duration that you specified when calling
|
||||
// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
|
||||
// a maximum of 3600 seconds (1 hour). The default is 1 hour.
|
||||
// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
|
||||
// last for one hour. However, you can use the optional DurationSeconds parameter
|
||||
// to specify the duration of your session. You can provide a value from 900
|
||||
// seconds (15 minutes) up to the maximum session duration setting for the role.
|
||||
// This setting can have a value from 1 hour to 12 hours. To learn how to view
|
||||
// the maximum value for your role, see View the Maximum Session Duration Setting
|
||||
// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide. The maximum session duration limit applies when you
|
||||
// use the AssumeRole* API operations or the assume-role* CLI operations but
|
||||
// does not apply when you use those operations to create a console URL. For
|
||||
// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// The temporary security credentials created by AssumeRoleWithWebIdentity can
|
||||
// be used to make API calls to any AWS service with the following exception:
|
||||
@@ -492,7 +524,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
||||
// the information from these providers to get and use temporary security
|
||||
// credentials.
|
||||
//
|
||||
// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
|
||||
// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
|
||||
// This article discusses web identity federation and shows an example of
|
||||
// how to use web identity federation to get access to content in Amazon
|
||||
// S3.
|
||||
@@ -569,7 +601,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
|
||||
|
||||
// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the DecodeAuthorizationMessage operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -681,7 +713,7 @@ const opGetCallerIdentity = "GetCallerIdentity"
|
||||
|
||||
// GetCallerIdentityRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the GetCallerIdentity operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -756,7 +788,7 @@ const opGetFederationToken = "GetFederationToken"
|
||||
|
||||
// GetFederationTokenRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the GetFederationToken operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -925,7 +957,7 @@ const opGetSessionToken = "GetSessionToken"
|
||||
|
||||
// GetSessionTokenRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the GetSessionToken operation. The "output" return
|
||||
// value will be populated with the request's response once the request complets
|
||||
// value will be populated with the request's response once the request completes
|
||||
// successfuly.
|
||||
//
|
||||
// Use "Send" method on the returned Request to send the API call to the service.
|
||||
@@ -1049,20 +1081,27 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
|
||||
type AssumeRoleInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The duration, in seconds, of the role session. The value can range from 900
|
||||
// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
|
||||
// to 3600 seconds.
|
||||
// seconds (15 minutes) up to the maximum session duration setting for the role.
|
||||
// This setting can have a value from 1 hour to 12 hours. If you specify a value
|
||||
// higher than this setting, the operation fails. For example, if you specify
|
||||
// a session duration of 12 hours, but your administrator set the maximum session
|
||||
// duration to 6 hours, your operation fails. To learn how to view the maximum
|
||||
// value for your role, see View the Maximum Session Duration Setting for a
|
||||
// Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// This is separate from the duration of a console session that you might request
|
||||
// using the returned credentials. The request to the federation endpoint for
|
||||
// a console sign-in token takes a SessionDuration parameter that specifies
|
||||
// the maximum length of the console session, separately from the DurationSeconds
|
||||
// parameter on this API. For more information, see Creating a URL that Enables
|
||||
// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
|
||||
// By default, the value is set to 3600 seconds.
|
||||
//
|
||||
// The DurationSeconds parameter is separate from the duration of a console
|
||||
// session that you might request using the returned credentials. The request
|
||||
// to the federation endpoint for a console sign-in token takes a SessionDuration
|
||||
// parameter that specifies the maximum length of the console session. For more
|
||||
// information, see Creating a URL that Enables Federated Users to Access the
|
||||
// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
|
||||
// in the IAM User Guide.
|
||||
DurationSeconds *int64 `min:"900" type:"integer"`
|
||||
|
||||
@@ -1241,7 +1280,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
|
||||
|
||||
// Contains the response to a successful AssumeRole request, including temporary
|
||||
// AWS credentials that can be used to make AWS requests.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
|
||||
type AssumeRoleOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1295,22 +1333,30 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
|
||||
return s
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
|
||||
type AssumeRoleWithSAMLInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The duration, in seconds, of the role session. The value can range from 900
|
||||
// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
|
||||
// to 3600 seconds. An expiration can also be specified in the SAML authentication
|
||||
// response's SessionNotOnOrAfter value. The actual expiration time is whichever
|
||||
// value is shorter.
|
||||
// The duration, in seconds, of the role session. Your role session lasts for
|
||||
// the duration that you specify for the DurationSeconds parameter, or until
|
||||
// the time specified in the SAML authentication response's SessionNotOnOrAfter
|
||||
// value, whichever is shorter. You can provide a DurationSeconds value from
|
||||
// 900 seconds (15 minutes) up to the maximum session duration setting for the
|
||||
// role. This setting can have a value from 1 hour to 12 hours. If you specify
|
||||
// a value higher than this setting, the operation fails. For example, if you
|
||||
// specify a session duration of 12 hours, but your administrator set the maximum
|
||||
// session duration to 6 hours, your operation fails. To learn how to view the
|
||||
// maximum value for your role, see View the Maximum Session Duration Setting
|
||||
// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// This is separate from the duration of a console session that you might request
|
||||
// using the returned credentials. The request to the federation endpoint for
|
||||
// a console sign-in token takes a SessionDuration parameter that specifies
|
||||
// the maximum length of the console session, separately from the DurationSeconds
|
||||
// parameter on this API. For more information, see Enabling SAML 2.0 Federated
|
||||
// Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
|
||||
// By default, the value is set to 3600 seconds.
|
||||
//
|
||||
// The DurationSeconds parameter is separate from the duration of a console
|
||||
// session that you might request using the returned credentials. The request
|
||||
// to the federation endpoint for a console sign-in token takes a SessionDuration
|
||||
// parameter that specifies the maximum length of the console session. For more
|
||||
// information, see Creating a URL that Enables Federated Users to Access the
|
||||
// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
|
||||
// in the IAM User Guide.
|
||||
DurationSeconds *int64 `min:"900" type:"integer"`
|
||||
|
||||
@@ -1436,7 +1482,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML
|
||||
|
||||
// Contains the response to a successful AssumeRoleWithSAML request, including
|
||||
// temporary AWS credentials that can be used to make AWS requests.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
|
||||
type AssumeRoleWithSAMLOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1548,20 +1593,27 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO
|
||||
return s
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
|
||||
type AssumeRoleWithWebIdentityInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The duration, in seconds, of the role session. The value can range from 900
|
||||
// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
|
||||
// to 3600 seconds.
|
||||
// seconds (15 minutes) up to the maximum session duration setting for the role.
|
||||
// This setting can have a value from 1 hour to 12 hours. If you specify a value
|
||||
// higher than this setting, the operation fails. For example, if you specify
|
||||
// a session duration of 12 hours, but your administrator set the maximum session
|
||||
// duration to 6 hours, your operation fails. To learn how to view the maximum
|
||||
// value for your role, see View the Maximum Session Duration Setting for a
|
||||
// Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// This is separate from the duration of a console session that you might request
|
||||
// using the returned credentials. The request to the federation endpoint for
|
||||
// a console sign-in token takes a SessionDuration parameter that specifies
|
||||
// the maximum length of the console session, separately from the DurationSeconds
|
||||
// parameter on this API. For more information, see Creating a URL that Enables
|
||||
// Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
|
||||
// By default, the value is set to 3600 seconds.
|
||||
//
|
||||
// The DurationSeconds parameter is separate from the duration of a console
|
||||
// session that you might request using the returned credentials. The request
|
||||
// to the federation endpoint for a console sign-in token takes a SessionDuration
|
||||
// parameter that specifies the maximum length of the console session. For more
|
||||
// information, see Creating a URL that Enables Federated Users to Access the
|
||||
// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
|
||||
// in the IAM User Guide.
|
||||
DurationSeconds *int64 `min:"900" type:"integer"`
|
||||
|
||||
@@ -1711,7 +1763,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo
|
||||
|
||||
// Contains the response to a successful AssumeRoleWithWebIdentity request,
|
||||
// including temporary AWS credentials that can be used to make AWS requests.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
|
||||
type AssumeRoleWithWebIdentityOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1804,7 +1855,6 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin
|
||||
|
||||
// The identifiers for the temporary security credentials that the operation
|
||||
// returns.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
|
||||
type AssumedRoleUser struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1847,7 +1897,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
|
||||
}
|
||||
|
||||
// AWS credentials for API authentication.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
|
||||
type Credentials struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1906,7 +1955,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials {
|
||||
return s
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
|
||||
type DecodeAuthorizationMessageInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1951,7 +1999,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut
|
||||
// A document that contains additional information about the authorization status
|
||||
// of a request from an encoded message that is returned in response to an AWS
|
||||
// request.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
|
||||
type DecodeAuthorizationMessageOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -1976,7 +2023,6 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu
|
||||
}
|
||||
|
||||
// Identifiers for the federated user that is associated with the credentials.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
|
||||
type FederatedUser struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -2017,7 +2063,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
|
||||
return s
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
|
||||
type GetCallerIdentityInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
}
|
||||
@@ -2034,7 +2079,6 @@ func (s GetCallerIdentityInput) GoString() string {
|
||||
|
||||
// Contains the response to a successful GetCallerIdentity request, including
|
||||
// information about the entity making the request.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
|
||||
type GetCallerIdentityOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -2080,7 +2124,6 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
|
||||
return s
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
|
||||
type GetFederationTokenInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -2189,7 +2232,6 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
|
||||
|
||||
// Contains the response to a successful GetFederationToken request, including
|
||||
// temporary AWS credentials that can be used to make AWS requests.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
|
||||
type GetFederationTokenOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -2242,7 +2284,6 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo
|
||||
return s
|
||||
}
|
||||
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
|
||||
type GetSessionTokenInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
@@ -2327,7 +2368,6 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
|
||||
|
||||
// Contains the response to a successful GetSessionToken request, including
|
||||
// temporary AWS credentials that can be used to make AWS requests.
|
||||
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
|
||||
type GetSessionTokenOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
||||
34
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
34
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
@@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
|
||||
5
vendor/github.com/fatih/color/color.go
generated
vendored
5
vendor/github.com/fatih/color/color.go
generated
vendored
@@ -24,6 +24,9 @@ var (
|
||||
// os.Stdout is used.
|
||||
Output = colorable.NewColorableStdout()
|
||||
|
||||
// Error defines a color supporting writer for os.Stderr.
|
||||
Error = colorable.NewColorableStderr()
|
||||
|
||||
// colorsCache is used to reduce the count of created Color objects and
|
||||
// allows to reuse already created objects with required Attribute.
|
||||
colorsCache = make(map[Attribute]*Color)
|
||||
@@ -341,7 +344,7 @@ func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||
}
|
||||
}
|
||||
|
||||
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
|
||||
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
|
||||
// an example output might be: "1;36" -> bold cyan
|
||||
func (c *Color) sequence() string {
|
||||
format := make([]string, len(c.params))
|
||||
|
||||
15
vendor/github.com/go-ini/ini/file.go
generated
vendored
15
vendor/github.com/go-ini/ini/file.go
generated
vendored
@@ -140,9 +140,14 @@ func (f *File) Section(name string) *Section {
|
||||
|
||||
// Section returns list of Section.
|
||||
func (f *File) Sections() []*Section {
|
||||
if f.BlockMode {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
}
|
||||
|
||||
sections := make([]*Section, len(f.sectionList))
|
||||
for i := range f.sectionList {
|
||||
sections[i] = f.Section(f.sectionList[i])
|
||||
for i, name := range f.sectionList {
|
||||
sections[i] = f.sections[name]
|
||||
}
|
||||
return sections
|
||||
}
|
||||
@@ -223,7 +228,7 @@ func (f *File) Append(source interface{}, others ...interface{}) error {
|
||||
|
||||
func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
equalSign := "="
|
||||
if PrettyFormat {
|
||||
if PrettyFormat || PrettyEqual {
|
||||
equalSign = " = "
|
||||
}
|
||||
|
||||
@@ -300,6 +305,10 @@ func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
|
||||
} else {
|
||||
key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:])
|
||||
}
|
||||
|
||||
// Support multiline comments
|
||||
key.Comment = strings.Replace(key.Comment, "\n", "\n; ", -1)
|
||||
|
||||
if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
10
vendor/github.com/go-ini/ini/ini.go
generated
vendored
10
vendor/github.com/go-ini/ini/ini.go
generated
vendored
@@ -32,7 +32,7 @@ const (
|
||||
|
||||
// Maximum allowed depth when recursively substituing variable names.
|
||||
_DEPTH_VALUES = 99
|
||||
_VERSION = "1.32.0"
|
||||
_VERSION = "1.36.0"
|
||||
)
|
||||
|
||||
// Version returns current package version literal.
|
||||
@@ -53,6 +53,9 @@ var (
|
||||
// or reduce all possible spaces for compact format.
|
||||
PrettyFormat = true
|
||||
|
||||
// Place spaces around "=" sign even when PrettyFormat is false
|
||||
PrettyEqual = false
|
||||
|
||||
// Explicitly write DEFAULT section header
|
||||
DefaultHeader = false
|
||||
|
||||
@@ -137,6 +140,11 @@ type LoadOptions struct {
|
||||
// AllowNestedValues indicates whether to allow AWS-like nested values.
|
||||
// Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
|
||||
AllowNestedValues bool
|
||||
// AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
|
||||
// Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
|
||||
// Relevant quote: Values can also span multiple lines, as long as they are indented deeper
|
||||
// than the first line of the value.
|
||||
AllowPythonMultilineValues bool
|
||||
// UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
|
||||
// when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
|
||||
UnescapeValueDoubleQuotes bool
|
||||
|
||||
84
vendor/github.com/go-ini/ini/parser.go
generated
vendored
84
vendor/github.com/go-ini/ini/parser.go
generated
vendored
@@ -19,11 +19,14 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)")
|
||||
|
||||
type tokenType int
|
||||
|
||||
const (
|
||||
@@ -194,7 +197,8 @@ func hasSurroundedQuote(in string, quote byte) bool {
|
||||
}
|
||||
|
||||
func (p *parser) readValue(in []byte,
|
||||
ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols bool) (string, error) {
|
||||
parserBufferSize int,
|
||||
ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols, allowPythonMultilines bool) (string, error) {
|
||||
|
||||
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
|
||||
if len(line) == 0 {
|
||||
@@ -224,11 +228,13 @@ func (p *parser) readValue(in []byte,
|
||||
return line[startIdx : pos+startIdx], nil
|
||||
}
|
||||
|
||||
lastChar := line[len(line)-1]
|
||||
// Won't be able to reach here if value only contains whitespace
|
||||
line = strings.TrimSpace(line)
|
||||
trimmedLastChar := line[len(line)-1]
|
||||
|
||||
// Check continuation lines when desired
|
||||
if !ignoreContinuation && line[len(line)-1] == '\\' {
|
||||
if !ignoreContinuation && trimmedLastChar == '\\' {
|
||||
return p.readContinuationLines(line[:len(line)-1])
|
||||
}
|
||||
|
||||
@@ -252,7 +258,50 @@ func (p *parser) readValue(in []byte,
|
||||
if strings.Contains(line, `\#`) {
|
||||
line = strings.Replace(line, `\#`, "#", -1)
|
||||
}
|
||||
} else if allowPythonMultilines && lastChar == '\n' {
|
||||
parserBufferPeekResult, _ := p.buf.Peek(parserBufferSize)
|
||||
peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
|
||||
|
||||
identSize := -1
|
||||
val := line
|
||||
|
||||
for {
|
||||
peekData, peekErr := peekBuffer.ReadBytes('\n')
|
||||
if peekErr != nil {
|
||||
if peekErr == io.EOF {
|
||||
return val, nil
|
||||
}
|
||||
return "", peekErr
|
||||
}
|
||||
|
||||
peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
|
||||
if len(peekMatches) != 3 {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
currentIdentSize := len(peekMatches[1])
|
||||
// NOTE: Return if not a python-ini multi-line value.
|
||||
if currentIdentSize < 0 {
|
||||
return val, nil
|
||||
}
|
||||
identSize = currentIdentSize
|
||||
|
||||
// NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer.
|
||||
_, err := p.readUntil('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
val += fmt.Sprintf("\n%s", peekMatches[2])
|
||||
}
|
||||
|
||||
// NOTE: If it was a Python multi-line value,
|
||||
// return the appended value.
|
||||
if identSize > 0 {
|
||||
return val, nil
|
||||
}
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
@@ -276,6 +325,29 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
|
||||
var line []byte
|
||||
var inUnparseableSection bool
|
||||
|
||||
// NOTE: Iterate and increase `currentPeekSize` until
|
||||
// the size of the parser buffer is found.
|
||||
// TODO: When Golang 1.10 is the lowest version supported,
|
||||
// replace with `parserBufferSize := p.buf.Size()`.
|
||||
parserBufferSize := 0
|
||||
// NOTE: Peek 1kb at a time.
|
||||
currentPeekSize := 1024
|
||||
|
||||
if f.options.AllowPythonMultilineValues {
|
||||
for {
|
||||
peekBytes, _ := p.buf.Peek(currentPeekSize)
|
||||
peekBytesLength := len(peekBytes)
|
||||
|
||||
if parserBufferSize >= peekBytesLength {
|
||||
break
|
||||
}
|
||||
|
||||
currentPeekSize *= 2
|
||||
parserBufferSize = peekBytesLength
|
||||
}
|
||||
}
|
||||
|
||||
for !p.isEOF {
|
||||
line, err = p.readUntil('\n')
|
||||
if err != nil {
|
||||
@@ -352,10 +424,12 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
// Treat as boolean key when desired, and whole line is key name.
|
||||
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
|
||||
kname, err := p.readValue(line,
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
f.options.IgnoreInlineComment,
|
||||
f.options.UnescapeValueDoubleQuotes,
|
||||
f.options.UnescapeValueCommentSymbols)
|
||||
f.options.UnescapeValueCommentSymbols,
|
||||
f.options.AllowPythonMultilineValues)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -379,10 +453,12 @@ func (f *File) parse(reader io.Reader) (err error) {
|
||||
}
|
||||
|
||||
value, err := p.readValue(line[offset:],
|
||||
parserBufferSize,
|
||||
f.options.IgnoreContinuation,
|
||||
f.options.IgnoreInlineComment,
|
||||
f.options.UnescapeValueDoubleQuotes,
|
||||
f.options.UnescapeValueCommentSymbols)
|
||||
f.options.UnescapeValueCommentSymbols,
|
||||
f.options.AllowPythonMultilineValues)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
6
vendor/github.com/go-xorm/builder/builder_insert.go
generated
vendored
6
vendor/github.com/go-xorm/builder/builder_insert.go
generated
vendored
@@ -15,7 +15,7 @@ func (b *Builder) insertWriteTo(w Writer) error {
|
||||
return errors.New("no table indicated")
|
||||
}
|
||||
if len(b.inserts) <= 0 {
|
||||
return errors.New("no column to be update")
|
||||
return errors.New("no column to be insert")
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "INSERT INTO %s (", b.tableName); err != nil {
|
||||
@@ -26,7 +26,9 @@ func (b *Builder) insertWriteTo(w Writer) error {
|
||||
var bs []byte
|
||||
var valBuffer = bytes.NewBuffer(bs)
|
||||
var i = 0
|
||||
for col, value := range b.inserts {
|
||||
|
||||
for _, col := range b.inserts.sortedKeys() {
|
||||
value := b.inserts[col]
|
||||
fmt.Fprint(w, col)
|
||||
if e, ok := value.(expr); ok {
|
||||
fmt.Fprint(valBuffer, e.sql)
|
||||
|
||||
8
vendor/github.com/go-xorm/builder/cond_compare.go
generated
vendored
8
vendor/github.com/go-xorm/builder/cond_compare.go
generated
vendored
@@ -10,7 +10,13 @@ import "fmt"
|
||||
func WriteMap(w Writer, data map[string]interface{}, op string) error {
|
||||
var args = make([]interface{}, 0, len(data))
|
||||
var i = 0
|
||||
for k, v := range data {
|
||||
keys := make([]string, 0, len(data))
|
||||
for k := range data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
v := data[k]
|
||||
switch v.(type) {
|
||||
case expr:
|
||||
if _, err := fmt.Fprintf(w, "%s%s(", k, op); err != nil {
|
||||
|
||||
20
vendor/github.com/go-xorm/builder/cond_eq.go
generated
vendored
20
vendor/github.com/go-xorm/builder/cond_eq.go
generated
vendored
@@ -4,7 +4,10 @@
|
||||
|
||||
package builder
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Incr implements a type used by Eq
|
||||
type Incr int
|
||||
@@ -19,7 +22,8 @@ var _ Cond = Eq{}
|
||||
|
||||
func (eq Eq) opWriteTo(op string, w Writer) error {
|
||||
var i = 0
|
||||
for k, v := range eq {
|
||||
for _, k := range eq.sortedKeys() {
|
||||
v := eq[k]
|
||||
switch v.(type) {
|
||||
case []int, []int64, []string, []int32, []int16, []int8, []uint, []uint64, []uint32, []uint16, []interface{}:
|
||||
if err := In(k, v).WriteTo(w); err != nil {
|
||||
@@ -94,3 +98,15 @@ func (eq Eq) Or(conds ...Cond) Cond {
|
||||
func (eq Eq) IsValid() bool {
|
||||
return len(eq) > 0
|
||||
}
|
||||
|
||||
// sortedKeys returns all keys of this Eq sorted with sort.Strings.
|
||||
// It is used internally for consistent ordering when generating
|
||||
// SQL, see https://github.com/go-xorm/builder/issues/10
|
||||
func (eq Eq) sortedKeys() []string {
|
||||
keys := make([]string, 0, len(eq))
|
||||
for key := range eq {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
2
vendor/github.com/go-xorm/builder/cond_like.go
generated
vendored
2
vendor/github.com/go-xorm/builder/cond_like.go
generated
vendored
@@ -16,7 +16,7 @@ func (like Like) WriteTo(w Writer) error {
|
||||
if _, err := fmt.Fprintf(w, "%s LIKE ?", like[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
// FIXME: if use other regular express, this will be failed. but for compitable, keep this
|
||||
// FIXME: if use other regular express, this will be failed. but for compatible, keep this
|
||||
if like[1][0] == '%' || like[1][len(like[1])-1] == '%' {
|
||||
w.Append(like[1])
|
||||
} else {
|
||||
|
||||
20
vendor/github.com/go-xorm/builder/cond_neq.go
generated
vendored
20
vendor/github.com/go-xorm/builder/cond_neq.go
generated
vendored
@@ -4,7 +4,10 @@
|
||||
|
||||
package builder
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Neq defines not equal conditions
|
||||
type Neq map[string]interface{}
|
||||
@@ -15,7 +18,8 @@ var _ Cond = Neq{}
|
||||
func (neq Neq) WriteTo(w Writer) error {
|
||||
var args = make([]interface{}, 0, len(neq))
|
||||
var i = 0
|
||||
for k, v := range neq {
|
||||
for _, k := range neq.sortedKeys() {
|
||||
v := neq[k]
|
||||
switch v.(type) {
|
||||
case []int, []int64, []string, []int32, []int16, []int8:
|
||||
if err := NotIn(k, v).WriteTo(w); err != nil {
|
||||
@@ -76,3 +80,15 @@ func (neq Neq) Or(conds ...Cond) Cond {
|
||||
func (neq Neq) IsValid() bool {
|
||||
return len(neq) > 0
|
||||
}
|
||||
|
||||
// sortedKeys returns all keys of this Neq sorted with sort.Strings.
|
||||
// It is used internally for consistent ordering when generating
|
||||
// SQL, see https://github.com/go-xorm/builder/issues/10
|
||||
func (neq Neq) sortedKeys() []string {
|
||||
keys := make([]string, 0, len(neq))
|
||||
for key := range neq {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
24
vendor/github.com/go-xorm/builder/cond_not.go
generated
vendored
24
vendor/github.com/go-xorm/builder/cond_not.go
generated
vendored
@@ -21,6 +21,18 @@ func (not Not) WriteTo(w Writer) error {
|
||||
if _, err := fmt.Fprint(w, "("); err != nil {
|
||||
return err
|
||||
}
|
||||
case Eq:
|
||||
if len(not[0].(Eq)) > 1 {
|
||||
if _, err := fmt.Fprint(w, "("); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case Neq:
|
||||
if len(not[0].(Neq)) > 1 {
|
||||
if _, err := fmt.Fprint(w, "("); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := not[0].WriteTo(w); err != nil {
|
||||
@@ -32,6 +44,18 @@ func (not Not) WriteTo(w Writer) error {
|
||||
if _, err := fmt.Fprint(w, ")"); err != nil {
|
||||
return err
|
||||
}
|
||||
case Eq:
|
||||
if len(not[0].(Eq)) > 1 {
|
||||
if _, err := fmt.Fprint(w, ")"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case Neq:
|
||||
if len(not[0].(Neq)) > 1 {
|
||||
if _, err := fmt.Fprint(w, ")"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
10
vendor/github.com/go-xorm/core/cache.go
generated
vendored
10
vendor/github.com/go-xorm/core/cache.go
generated
vendored
@@ -1,11 +1,12 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -55,11 +56,10 @@ func encodeIds(ids []PK) (string, error) {
|
||||
return buf.String(), err
|
||||
}
|
||||
|
||||
|
||||
func decodeIds(s string) ([]PK, error) {
|
||||
pks := make([]PK, 0)
|
||||
|
||||
dec := gob.NewDecoder(bytes.NewBufferString(s))
|
||||
dec := gob.NewDecoder(strings.NewReader(s))
|
||||
err := dec.Decode(&pks)
|
||||
|
||||
return pks, err
|
||||
|
||||
16
vendor/github.com/go-xorm/core/column.go
generated
vendored
16
vendor/github.com/go-xorm/core/column.go
generated
vendored
@@ -79,6 +79,10 @@ func (col *Column) String(d Dialect) string {
|
||||
}
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
|
||||
if d.ShowCreateNull() {
|
||||
if col.Nullable {
|
||||
sql += "NULL "
|
||||
@@ -87,10 +91,6 @@ func (col *Column) String(d Dialect) string {
|
||||
}
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
|
||||
return sql
|
||||
}
|
||||
|
||||
@@ -99,6 +99,10 @@ func (col *Column) StringNoPk(d Dialect) string {
|
||||
|
||||
sql += d.SqlType(col) + " "
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
|
||||
if d.ShowCreateNull() {
|
||||
if col.Nullable {
|
||||
sql += "NULL "
|
||||
@@ -107,10 +111,6 @@ func (col *Column) StringNoPk(d Dialect) string {
|
||||
}
|
||||
}
|
||||
|
||||
if col.Default != "" {
|
||||
sql += "DEFAULT " + col.Default + " "
|
||||
}
|
||||
|
||||
return sql
|
||||
}
|
||||
|
||||
|
||||
57
vendor/github.com/go-xorm/core/db.go
generated
vendored
57
vendor/github.com/go-xorm/core/db.go
generated
vendored
@@ -7,6 +7,11 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultCacheSize = 200
|
||||
)
|
||||
|
||||
func MapToSlice(query string, mp interface{}) (string, []interface{}, error) {
|
||||
@@ -58,9 +63,16 @@ func StructToSlice(query string, st interface{}) (string, []interface{}, error)
|
||||
return query, args, nil
|
||||
}
|
||||
|
||||
type cacheStruct struct {
|
||||
value reflect.Value
|
||||
idx int
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
*sql.DB
|
||||
Mapper IMapper
|
||||
Mapper IMapper
|
||||
reflectCache map[reflect.Type]*cacheStruct
|
||||
reflectCacheMutex sync.RWMutex
|
||||
}
|
||||
|
||||
func Open(driverName, dataSourceName string) (*DB, error) {
|
||||
@@ -68,11 +80,32 @@ func Open(driverName, dataSourceName string) (*DB, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DB{db, NewCacheMapper(&SnakeMapper{})}, nil
|
||||
return &DB{
|
||||
DB: db,
|
||||
Mapper: NewCacheMapper(&SnakeMapper{}),
|
||||
reflectCache: make(map[reflect.Type]*cacheStruct),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func FromDB(db *sql.DB) *DB {
|
||||
return &DB{db, NewCacheMapper(&SnakeMapper{})}
|
||||
return &DB{
|
||||
DB: db,
|
||||
Mapper: NewCacheMapper(&SnakeMapper{}),
|
||||
reflectCache: make(map[reflect.Type]*cacheStruct),
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) reflectNew(typ reflect.Type) reflect.Value {
|
||||
db.reflectCacheMutex.Lock()
|
||||
defer db.reflectCacheMutex.Unlock()
|
||||
cs, ok := db.reflectCache[typ]
|
||||
if !ok || cs.idx+1 > DefaultCacheSize-1 {
|
||||
cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0}
|
||||
db.reflectCache[typ] = cs
|
||||
} else {
|
||||
cs.idx = cs.idx + 1
|
||||
}
|
||||
return cs.value.Index(cs.idx).Addr()
|
||||
}
|
||||
|
||||
func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
|
||||
@@ -83,7 +116,7 @@ func (db *DB) Query(query string, args ...interface{}) (*Rows, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{rows, db.Mapper}, nil
|
||||
return &Rows{rows, db}, nil
|
||||
}
|
||||
|
||||
func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) {
|
||||
@@ -128,8 +161,8 @@ func (db *DB) QueryRowStruct(query string, st interface{}) *Row {
|
||||
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
Mapper IMapper
|
||||
names map[string]int
|
||||
db *DB
|
||||
names map[string]int
|
||||
}
|
||||
|
||||
func (db *DB) Prepare(query string) (*Stmt, error) {
|
||||
@@ -145,7 +178,7 @@ func (db *DB) Prepare(query string) (*Stmt, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Stmt{stmt, db.Mapper, names}, nil
|
||||
return &Stmt{stmt, db, names}, nil
|
||||
}
|
||||
|
||||
func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) {
|
||||
@@ -179,7 +212,7 @@ func (s *Stmt) Query(args ...interface{}) (*Rows, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{rows, s.Mapper}, nil
|
||||
return &Rows{rows, s.db}, nil
|
||||
}
|
||||
|
||||
func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) {
|
||||
@@ -274,7 +307,7 @@ func (EmptyScanner) Scan(src interface{}) error {
|
||||
|
||||
type Tx struct {
|
||||
*sql.Tx
|
||||
Mapper IMapper
|
||||
db *DB
|
||||
}
|
||||
|
||||
func (db *DB) Begin() (*Tx, error) {
|
||||
@@ -282,7 +315,7 @@ func (db *DB) Begin() (*Tx, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Tx{tx, db.Mapper}, nil
|
||||
return &Tx{tx, db}, nil
|
||||
}
|
||||
|
||||
func (tx *Tx) Prepare(query string) (*Stmt, error) {
|
||||
@@ -298,7 +331,7 @@ func (tx *Tx) Prepare(query string) (*Stmt, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Stmt{stmt, tx.Mapper, names}, nil
|
||||
return &Stmt{stmt, tx.db, names}, nil
|
||||
}
|
||||
|
||||
func (tx *Tx) Stmt(stmt *Stmt) *Stmt {
|
||||
@@ -327,7 +360,7 @@ func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Rows{rows, tx.Mapper}, nil
|
||||
return &Rows{rows, tx.db}, nil
|
||||
}
|
||||
|
||||
func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) {
|
||||
|
||||
7
vendor/github.com/go-xorm/core/dialect.go
generated
vendored
7
vendor/github.com/go-xorm/core/dialect.go
generated
vendored
@@ -74,6 +74,7 @@ type Dialect interface {
|
||||
GetIndexes(tableName string) (map[string]*Index, error)
|
||||
|
||||
Filters() []Filter
|
||||
SetParams(params map[string]string)
|
||||
}
|
||||
|
||||
func OpenDialect(dialect Dialect) (*DB, error) {
|
||||
@@ -148,7 +149,8 @@ func (db *Base) SupportDropIfExists() bool {
|
||||
}
|
||||
|
||||
func (db *Base) DropTableSql(tableName string) string {
|
||||
return fmt.Sprintf("DROP TABLE IF EXISTS `%s`", tableName)
|
||||
quote := db.dialect.Quote
|
||||
return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName))
|
||||
}
|
||||
|
||||
func (db *Base) HasRecords(query string, args ...interface{}) (bool, error) {
|
||||
@@ -289,6 +291,9 @@ func (b *Base) LogSQL(sql string, args []interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Base) SetParams(params map[string]string) {
|
||||
}
|
||||
|
||||
var (
|
||||
dialects = map[string]func() Dialect{}
|
||||
)
|
||||
|
||||
6
vendor/github.com/go-xorm/core/filter.go
generated
vendored
6
vendor/github.com/go-xorm/core/filter.go
generated
vendored
@@ -37,9 +37,9 @@ func (q *Quoter) Quote(content string) string {
|
||||
func (i *IdFilter) Do(sql string, dialect Dialect, table *Table) string {
|
||||
quoter := NewQuoter(dialect)
|
||||
if table != nil && len(table.PrimaryKeys) == 1 {
|
||||
sql = strings.Replace(sql, "`(id)`", quoter.Quote(table.PrimaryKeys[0]), -1)
|
||||
sql = strings.Replace(sql, quoter.Quote("(id)"), quoter.Quote(table.PrimaryKeys[0]), -1)
|
||||
return strings.Replace(sql, "(id)", quoter.Quote(table.PrimaryKeys[0]), -1)
|
||||
sql = strings.Replace(sql, " `(id)` ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
|
||||
sql = strings.Replace(sql, " "+quoter.Quote("(id)")+" ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
|
||||
return strings.Replace(sql, " (id) ", " "+quoter.Quote(table.PrimaryKeys[0])+" ", -1)
|
||||
}
|
||||
return sql
|
||||
}
|
||||
|
||||
2
vendor/github.com/go-xorm/core/index.go
generated
vendored
2
vendor/github.com/go-xorm/core/index.go
generated
vendored
@@ -22,6 +22,8 @@ type Index struct {
|
||||
func (index *Index) XName(tableName string) string {
|
||||
if !strings.HasPrefix(index.Name, "UQE_") &&
|
||||
!strings.HasPrefix(index.Name, "IDX_") {
|
||||
tableName = strings.Replace(tableName, `"`, "", -1)
|
||||
tableName = strings.Replace(tableName, `.`, "_", -1)
|
||||
if index.Type == UniqueType {
|
||||
return fmt.Sprintf("UQE_%v_%v", tableName, index.Name)
|
||||
}
|
||||
|
||||
64
vendor/github.com/go-xorm/core/rows.go
generated
vendored
64
vendor/github.com/go-xorm/core/rows.go
generated
vendored
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
type Rows struct {
|
||||
*sql.Rows
|
||||
Mapper IMapper
|
||||
db *DB
|
||||
}
|
||||
|
||||
func (rs *Rows) ToMapString() ([]map[string]string, error) {
|
||||
@@ -105,7 +105,7 @@ func (rs *Rows) ScanStructByName(dest interface{}) error {
|
||||
newDest := make([]interface{}, len(cols))
|
||||
var v EmptyScanner
|
||||
for j, name := range cols {
|
||||
f := fieldByName(vv.Elem(), rs.Mapper.Table2Obj(name))
|
||||
f := fieldByName(vv.Elem(), rs.db.Mapper.Table2Obj(name))
|
||||
if f.IsValid() {
|
||||
newDest[j] = f.Addr().Interface()
|
||||
} else {
|
||||
@@ -116,36 +116,6 @@ func (rs *Rows) ScanStructByName(dest interface{}) error {
|
||||
return rs.Rows.Scan(newDest...)
|
||||
}
|
||||
|
||||
type cacheStruct struct {
|
||||
value reflect.Value
|
||||
idx int
|
||||
}
|
||||
|
||||
var (
|
||||
reflectCache = make(map[reflect.Type]*cacheStruct)
|
||||
reflectCacheMutex sync.RWMutex
|
||||
)
|
||||
|
||||
func ReflectNew(typ reflect.Type) reflect.Value {
|
||||
reflectCacheMutex.RLock()
|
||||
cs, ok := reflectCache[typ]
|
||||
reflectCacheMutex.RUnlock()
|
||||
|
||||
const newSize = 200
|
||||
|
||||
if !ok || cs.idx+1 > newSize-1 {
|
||||
cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), newSize, newSize), 0}
|
||||
reflectCacheMutex.Lock()
|
||||
reflectCache[typ] = cs
|
||||
reflectCacheMutex.Unlock()
|
||||
} else {
|
||||
reflectCacheMutex.Lock()
|
||||
cs.idx = cs.idx + 1
|
||||
reflectCacheMutex.Unlock()
|
||||
}
|
||||
return cs.value.Index(cs.idx).Addr()
|
||||
}
|
||||
|
||||
// scan data to a slice's pointer, slice's length should equal to columns' number
|
||||
func (rs *Rows) ScanSlice(dest interface{}) error {
|
||||
vv := reflect.ValueOf(dest)
|
||||
@@ -197,9 +167,7 @@ func (rs *Rows) ScanMap(dest interface{}) error {
|
||||
vvv := vv.Elem()
|
||||
|
||||
for i, _ := range cols {
|
||||
newDest[i] = ReflectNew(vvv.Type().Elem()).Interface()
|
||||
//v := reflect.New(vvv.Type().Elem())
|
||||
//newDest[i] = v.Interface()
|
||||
newDest[i] = rs.db.reflectNew(vvv.Type().Elem()).Interface()
|
||||
}
|
||||
|
||||
err = rs.Rows.Scan(newDest...)
|
||||
@@ -215,32 +183,6 @@ func (rs *Rows) ScanMap(dest interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*func (rs *Rows) ScanMap(dest interface{}) error {
|
||||
vv := reflect.ValueOf(dest)
|
||||
if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map {
|
||||
return errors.New("dest should be a map's pointer")
|
||||
}
|
||||
|
||||
cols, err := rs.Columns()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newDest := make([]interface{}, len(cols))
|
||||
err = rs.ScanSlice(newDest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vvv := vv.Elem()
|
||||
|
||||
for i, name := range cols {
|
||||
vname := reflect.ValueOf(name)
|
||||
vvv.SetMapIndex(vname, reflect.ValueOf(newDest[i]).Elem())
|
||||
}
|
||||
|
||||
return nil
|
||||
}*/
|
||||
type Row struct {
|
||||
rows *Rows
|
||||
// One of these two will be non-nil:
|
||||
|
||||
3
vendor/github.com/go-xorm/core/scan.go
generated
vendored
3
vendor/github.com/go-xorm/core/scan.go
generated
vendored
@@ -44,6 +44,9 @@ func convertTime(dest *NullTime, src interface{}) error {
|
||||
}
|
||||
*dest = NullTime(t)
|
||||
return nil
|
||||
case time.Time:
|
||||
*dest = NullTime(s)
|
||||
return nil
|
||||
case nil:
|
||||
default:
|
||||
return fmt.Errorf("unsupported driver -> Scan pair: %T -> %T", src, dest)
|
||||
|
||||
36
vendor/github.com/go-xorm/core/type.go
generated
vendored
36
vendor/github.com/go-xorm/core/type.go
generated
vendored
@@ -69,15 +69,17 @@ var (
|
||||
Enum = "ENUM"
|
||||
Set = "SET"
|
||||
|
||||
Char = "CHAR"
|
||||
Varchar = "VARCHAR"
|
||||
NVarchar = "NVARCHAR"
|
||||
TinyText = "TINYTEXT"
|
||||
Text = "TEXT"
|
||||
Clob = "CLOB"
|
||||
MediumText = "MEDIUMTEXT"
|
||||
LongText = "LONGTEXT"
|
||||
Uuid = "UUID"
|
||||
Char = "CHAR"
|
||||
Varchar = "VARCHAR"
|
||||
NVarchar = "NVARCHAR"
|
||||
TinyText = "TINYTEXT"
|
||||
Text = "TEXT"
|
||||
Clob = "CLOB"
|
||||
MediumText = "MEDIUMTEXT"
|
||||
LongText = "LONGTEXT"
|
||||
Uuid = "UUID"
|
||||
UniqueIdentifier = "UNIQUEIDENTIFIER"
|
||||
SysName = "SYSNAME"
|
||||
|
||||
Date = "DATE"
|
||||
DateTime = "DATETIME"
|
||||
@@ -132,6 +134,7 @@ var (
|
||||
LongText: TEXT_TYPE,
|
||||
Uuid: TEXT_TYPE,
|
||||
Clob: TEXT_TYPE,
|
||||
SysName: TEXT_TYPE,
|
||||
|
||||
Date: TIME_TYPE,
|
||||
DateTime: TIME_TYPE,
|
||||
@@ -148,11 +151,12 @@ var (
|
||||
Binary: BLOB_TYPE,
|
||||
VarBinary: BLOB_TYPE,
|
||||
|
||||
TinyBlob: BLOB_TYPE,
|
||||
Blob: BLOB_TYPE,
|
||||
MediumBlob: BLOB_TYPE,
|
||||
LongBlob: BLOB_TYPE,
|
||||
Bytea: BLOB_TYPE,
|
||||
TinyBlob: BLOB_TYPE,
|
||||
Blob: BLOB_TYPE,
|
||||
MediumBlob: BLOB_TYPE,
|
||||
LongBlob: BLOB_TYPE,
|
||||
Bytea: BLOB_TYPE,
|
||||
UniqueIdentifier: BLOB_TYPE,
|
||||
|
||||
Bool: NUMERIC_TYPE,
|
||||
|
||||
@@ -289,9 +293,9 @@ func SQLType2Type(st SQLType) reflect.Type {
|
||||
return reflect.TypeOf(float32(1))
|
||||
case Double:
|
||||
return reflect.TypeOf(float64(1))
|
||||
case Char, Varchar, NVarchar, TinyText, Text, MediumText, LongText, Enum, Set, Uuid, Clob:
|
||||
case Char, Varchar, NVarchar, TinyText, Text, MediumText, LongText, Enum, Set, Uuid, Clob, SysName:
|
||||
return reflect.TypeOf("")
|
||||
case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary:
|
||||
case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier:
|
||||
return reflect.TypeOf([]byte{})
|
||||
case Bool:
|
||||
return reflect.TypeOf(true)
|
||||
|
||||
74
vendor/github.com/go-xorm/xorm/dialect_mysql.go
generated
vendored
74
vendor/github.com/go-xorm/xorm/dialect_mysql.go
generated
vendored
@@ -172,12 +172,33 @@ type mysql struct {
|
||||
allowAllFiles bool
|
||||
allowOldPasswords bool
|
||||
clientFoundRows bool
|
||||
rowFormat string
|
||||
}
|
||||
|
||||
func (db *mysql) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
|
||||
return db.Base.Init(d, db, uri, drivername, dataSourceName)
|
||||
}
|
||||
|
||||
func (db *mysql) SetParams(params map[string]string) {
|
||||
rowFormat, ok := params["rowFormat"]
|
||||
if ok {
|
||||
var t = strings.ToUpper(rowFormat)
|
||||
switch t {
|
||||
case "COMPACT":
|
||||
fallthrough
|
||||
case "REDUNDANT":
|
||||
fallthrough
|
||||
case "DYNAMIC":
|
||||
fallthrough
|
||||
case "COMPRESSED":
|
||||
db.rowFormat = t
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *mysql) SqlType(c *core.Column) string {
|
||||
var res string
|
||||
switch t := c.SQLType.Name; t {
|
||||
@@ -487,6 +508,59 @@ func (db *mysql) GetIndexes(tableName string) (map[string]*core.Index, error) {
|
||||
return indexes, nil
|
||||
}
|
||||
|
||||
func (db *mysql) CreateTableSql(table *core.Table, tableName, storeEngine, charset string) string {
|
||||
var sql string
|
||||
sql = "CREATE TABLE IF NOT EXISTS "
|
||||
if tableName == "" {
|
||||
tableName = table.Name
|
||||
}
|
||||
|
||||
sql += db.Quote(tableName)
|
||||
sql += " ("
|
||||
|
||||
if len(table.ColumnsSeq()) > 0 {
|
||||
pkList := table.PrimaryKeys
|
||||
|
||||
for _, colName := range table.ColumnsSeq() {
|
||||
col := table.GetColumn(colName)
|
||||
if col.IsPrimaryKey && len(pkList) == 1 {
|
||||
sql += col.String(db)
|
||||
} else {
|
||||
sql += col.StringNoPk(db)
|
||||
}
|
||||
sql = strings.TrimSpace(sql)
|
||||
if len(col.Comment) > 0 {
|
||||
sql += " COMMENT '" + col.Comment + "'"
|
||||
}
|
||||
sql += ", "
|
||||
}
|
||||
|
||||
if len(pkList) > 1 {
|
||||
sql += "PRIMARY KEY ( "
|
||||
sql += db.Quote(strings.Join(pkList, db.Quote(",")))
|
||||
sql += " ), "
|
||||
}
|
||||
|
||||
sql = sql[:len(sql)-2]
|
||||
}
|
||||
sql += ")"
|
||||
|
||||
if storeEngine != "" {
|
||||
sql += " ENGINE=" + storeEngine
|
||||
}
|
||||
|
||||
if len(charset) == 0 {
|
||||
charset = db.URI().Charset
|
||||
} else if len(charset) > 0 {
|
||||
sql += " DEFAULT CHARSET " + charset
|
||||
}
|
||||
|
||||
if db.rowFormat != "" {
|
||||
sql += " ROW_FORMAT=" + db.rowFormat
|
||||
}
|
||||
return sql
|
||||
}
|
||||
|
||||
func (db *mysql) Filters() []core.Filter {
|
||||
return []core.Filter{&core.IdFilter{}}
|
||||
}
|
||||
|
||||
96
vendor/github.com/go-xorm/xorm/dialect_postgres.go
generated
vendored
96
vendor/github.com/go-xorm/xorm/dialect_postgres.go
generated
vendored
@@ -764,14 +764,26 @@ var (
|
||||
"YES": true,
|
||||
"ZONE": true,
|
||||
}
|
||||
|
||||
// DefaultPostgresSchema default postgres schema
|
||||
DefaultPostgresSchema = "public"
|
||||
)
|
||||
|
||||
const postgresPublicSchema = "public"
|
||||
|
||||
type postgres struct {
|
||||
core.Base
|
||||
}
|
||||
|
||||
func (db *postgres) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error {
|
||||
return db.Base.Init(d, db, uri, drivername, dataSourceName)
|
||||
err := db.Base.Init(d, db, uri, drivername, dataSourceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if db.Schema == "" {
|
||||
db.Schema = DefaultPostgresSchema
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *postgres) SqlType(c *core.Column) string {
|
||||
@@ -868,32 +880,42 @@ func (db *postgres) IndexOnTable() bool {
|
||||
}
|
||||
|
||||
func (db *postgres) IndexCheckSql(tableName, idxName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName, idxName}
|
||||
if len(db.Schema) == 0 {
|
||||
args := []interface{}{tableName, idxName}
|
||||
return `SELECT indexname FROM pg_indexes WHERE tablename = ? AND indexname = ?`, args
|
||||
}
|
||||
|
||||
args := []interface{}{db.Schema, tableName, idxName}
|
||||
return `SELECT indexname FROM pg_indexes ` +
|
||||
`WHERE tablename = ? AND indexname = ?`, args
|
||||
`WHERE schemaname = ? AND tablename = ? AND indexname = ?`, args
|
||||
}
|
||||
|
||||
func (db *postgres) TableCheckSql(tableName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName}
|
||||
return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args
|
||||
if len(db.Schema) == 0 {
|
||||
args := []interface{}{tableName}
|
||||
return `SELECT tablename FROM pg_tables WHERE tablename = ?`, args
|
||||
}
|
||||
|
||||
args := []interface{}{db.Schema, tableName}
|
||||
return `SELECT tablename FROM pg_tables WHERE schemaname = ? AND tablename = ?`, args
|
||||
}
|
||||
|
||||
/*func (db *postgres) ColumnCheckSql(tableName, colName string) (string, []interface{}) {
|
||||
args := []interface{}{tableName, colName}
|
||||
return "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = ?" +
|
||||
" AND column_name = ?", args
|
||||
}*/
|
||||
|
||||
func (db *postgres) ModifyColumnSql(tableName string, col *core.Column) string {
|
||||
return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s",
|
||||
tableName, col.Name, db.SqlType(col))
|
||||
if len(db.Schema) == 0 {
|
||||
return fmt.Sprintf("alter table %s ALTER COLUMN %s TYPE %s",
|
||||
tableName, col.Name, db.SqlType(col))
|
||||
}
|
||||
return fmt.Sprintf("alter table %s.%s ALTER COLUMN %s TYPE %s",
|
||||
db.Schema, tableName, col.Name, db.SqlType(col))
|
||||
}
|
||||
|
||||
func (db *postgres) DropIndexSql(tableName string, index *core.Index) string {
|
||||
//var unique string
|
||||
quote := db.Quote
|
||||
idxName := index.Name
|
||||
|
||||
tableName = strings.Replace(tableName, `"`, "", -1)
|
||||
tableName = strings.Replace(tableName, `.`, "_", -1)
|
||||
|
||||
if !strings.HasPrefix(idxName, "UQE_") &&
|
||||
!strings.HasPrefix(idxName, "IDX_") {
|
||||
if index.Type == core.UniqueType {
|
||||
@@ -902,13 +924,21 @@ func (db *postgres) DropIndexSql(tableName string, index *core.Index) string {
|
||||
idxName = fmt.Sprintf("IDX_%v_%v", tableName, index.Name)
|
||||
}
|
||||
}
|
||||
if db.Uri.Schema != "" {
|
||||
idxName = db.Uri.Schema + "." + idxName
|
||||
}
|
||||
return fmt.Sprintf("DROP INDEX %v", quote(idxName))
|
||||
}
|
||||
|
||||
func (db *postgres) IsColumnExist(tableName, colName string) (bool, error) {
|
||||
args := []interface{}{tableName, colName}
|
||||
query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" +
|
||||
" AND column_name = $2"
|
||||
args := []interface{}{db.Schema, tableName, colName}
|
||||
query := "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = $1 AND table_name = $2" +
|
||||
" AND column_name = $3"
|
||||
if len(db.Schema) == 0 {
|
||||
args = []interface{}{tableName, colName}
|
||||
query = "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = $1" +
|
||||
" AND column_name = $2"
|
||||
}
|
||||
db.LogSQL(query, args)
|
||||
|
||||
rows, err := db.DB().Query(query, args...)
|
||||
@@ -921,8 +951,7 @@ func (db *postgres) IsColumnExist(tableName, colName string) (bool, error) {
|
||||
}
|
||||
|
||||
func (db *postgres) GetColumns(tableName string) ([]string, map[string]*core.Column, error) {
|
||||
// FIXME: the schema should be replaced by user custom's
|
||||
args := []interface{}{tableName, "public"}
|
||||
args := []interface{}{tableName}
|
||||
s := `SELECT column_name, column_default, is_nullable, data_type, character_maximum_length, numeric_precision, numeric_precision_radix ,
|
||||
CASE WHEN p.contype = 'p' THEN true ELSE false END AS primarykey,
|
||||
CASE WHEN p.contype = 'u' THEN true ELSE false END AS uniquekey
|
||||
@@ -933,7 +962,15 @@ FROM pg_attribute f
|
||||
LEFT JOIN pg_constraint p ON p.conrelid = c.oid AND f.attnum = ANY (p.conkey)
|
||||
LEFT JOIN pg_class AS g ON p.confrelid = g.oid
|
||||
LEFT JOIN INFORMATION_SCHEMA.COLUMNS s ON s.column_name=f.attname AND c.relname=s.table_name
|
||||
WHERE c.relkind = 'r'::char AND c.relname = $1 AND s.table_schema = $2 AND f.attnum > 0 ORDER BY f.attnum;`
|
||||
WHERE c.relkind = 'r'::char AND c.relname = $1%s AND f.attnum > 0 ORDER BY f.attnum;`
|
||||
|
||||
var f string
|
||||
if len(db.Schema) != 0 {
|
||||
args = append(args, db.Schema)
|
||||
f = " AND s.table_schema = $2"
|
||||
}
|
||||
s = fmt.Sprintf(s, f)
|
||||
|
||||
db.LogSQL(s, args)
|
||||
|
||||
rows, err := db.DB().Query(s, args...)
|
||||
@@ -1023,9 +1060,13 @@ WHERE c.relkind = 'r'::char AND c.relname = $1 AND s.table_schema = $2 AND f.att
|
||||
}
|
||||
|
||||
func (db *postgres) GetTables() ([]*core.Table, error) {
|
||||
// FIXME: replace public to user customrize schema
|
||||
args := []interface{}{"public"}
|
||||
s := fmt.Sprintf("SELECT tablename FROM pg_tables WHERE schemaname = $1")
|
||||
args := []interface{}{}
|
||||
s := "SELECT tablename FROM pg_tables"
|
||||
if len(db.Schema) != 0 {
|
||||
args = append(args, db.Schema)
|
||||
s = s + " WHERE schemaname = $1"
|
||||
}
|
||||
|
||||
db.LogSQL(s, args)
|
||||
|
||||
rows, err := db.DB().Query(s, args...)
|
||||
@@ -1049,9 +1090,12 @@ func (db *postgres) GetTables() ([]*core.Table, error) {
|
||||
}
|
||||
|
||||
func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) {
|
||||
// FIXME: replace the public schema to user specify schema
|
||||
args := []interface{}{"public", tableName}
|
||||
s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE schemaname=$1 AND tablename=$2")
|
||||
args := []interface{}{tableName}
|
||||
s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1")
|
||||
if len(db.Schema) != 0 {
|
||||
args = append(args, db.Schema)
|
||||
s = s + " AND schemaname=$2"
|
||||
}
|
||||
db.LogSQL(s, args)
|
||||
|
||||
rows, err := db.DB().Query(s, args...)
|
||||
|
||||
170
vendor/github.com/go-xorm/xorm/engine.go
generated
vendored
170
vendor/github.com/go-xorm/xorm/engine.go
generated
vendored
@@ -49,6 +49,35 @@ type Engine struct {
|
||||
tagHandlers map[string]tagHandler
|
||||
|
||||
engineGroup *EngineGroup
|
||||
|
||||
cachers map[string]core.Cacher
|
||||
cacherLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (engine *Engine) setCacher(tableName string, cacher core.Cacher) {
|
||||
engine.cacherLock.Lock()
|
||||
engine.cachers[tableName] = cacher
|
||||
engine.cacherLock.Unlock()
|
||||
}
|
||||
|
||||
func (engine *Engine) SetCacher(tableName string, cacher core.Cacher) {
|
||||
engine.setCacher(tableName, cacher)
|
||||
}
|
||||
|
||||
func (engine *Engine) getCacher(tableName string) core.Cacher {
|
||||
var cacher core.Cacher
|
||||
var ok bool
|
||||
engine.cacherLock.RLock()
|
||||
cacher, ok = engine.cachers[tableName]
|
||||
engine.cacherLock.RUnlock()
|
||||
if !ok && !engine.disableGlobalCache {
|
||||
cacher = engine.Cacher
|
||||
}
|
||||
return cacher
|
||||
}
|
||||
|
||||
func (engine *Engine) GetCacher(tableName string) core.Cacher {
|
||||
return engine.getCacher(tableName)
|
||||
}
|
||||
|
||||
// BufferSize sets buffer size for iterate
|
||||
@@ -245,13 +274,7 @@ func (engine *Engine) NoCascade() *Session {
|
||||
|
||||
// MapCacher Set a table use a special cacher
|
||||
func (engine *Engine) MapCacher(bean interface{}, cacher core.Cacher) error {
|
||||
v := rValue(bean)
|
||||
tb, err := engine.autoMapType(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tb.Cacher = cacher
|
||||
engine.setCacher(engine.TableName(bean, true), cacher)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -536,33 +559,6 @@ func (engine *Engine) dumpTables(tables []*core.Table, w io.Writer, tp ...core.D
|
||||
return nil
|
||||
}
|
||||
|
||||
func (engine *Engine) tableName(beanOrTableName interface{}) (string, error) {
|
||||
v := rValue(beanOrTableName)
|
||||
if v.Type().Kind() == reflect.String {
|
||||
return beanOrTableName.(string), nil
|
||||
} else if v.Type().Kind() == reflect.Struct {
|
||||
return engine.tbName(v), nil
|
||||
}
|
||||
return "", errors.New("bean should be a struct or struct's point")
|
||||
}
|
||||
|
||||
func (engine *Engine) tbName(v reflect.Value) string {
|
||||
if tb, ok := v.Interface().(TableName); ok {
|
||||
return tb.TableName()
|
||||
}
|
||||
|
||||
if v.Type().Kind() == reflect.Ptr {
|
||||
if tb, ok := reflect.Indirect(v).Interface().(TableName); ok {
|
||||
return tb.TableName()
|
||||
}
|
||||
} else if v.CanAddr() {
|
||||
if tb, ok := v.Addr().Interface().(TableName); ok {
|
||||
return tb.TableName()
|
||||
}
|
||||
}
|
||||
return engine.TableMapper.Obj2Table(reflect.Indirect(v).Type().Name())
|
||||
}
|
||||
|
||||
// Cascade use cascade or not
|
||||
func (engine *Engine) Cascade(trueOrFalse ...bool) *Session {
|
||||
session := engine.NewSession()
|
||||
@@ -846,7 +842,7 @@ func (engine *Engine) TableInfo(bean interface{}) *Table {
|
||||
if err != nil {
|
||||
engine.logger.Error(err)
|
||||
}
|
||||
return &Table{tb, engine.tbName(v)}
|
||||
return &Table{tb, engine.TableName(bean)}
|
||||
}
|
||||
|
||||
func addIndex(indexName string, table *core.Table, col *core.Column, indexType int) {
|
||||
@@ -861,15 +857,6 @@ func addIndex(indexName string, table *core.Table, col *core.Column, indexType i
|
||||
}
|
||||
}
|
||||
|
||||
func (engine *Engine) newTable() *core.Table {
|
||||
table := core.NewEmptyTable()
|
||||
|
||||
if !engine.disableGlobalCache {
|
||||
table.Cacher = engine.Cacher
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
// TableName table name interface to define customerize table name
|
||||
type TableName interface {
|
||||
TableName() string
|
||||
@@ -881,21 +868,9 @@ var (
|
||||
|
||||
func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) {
|
||||
t := v.Type()
|
||||
table := engine.newTable()
|
||||
if tb, ok := v.Interface().(TableName); ok {
|
||||
table.Name = tb.TableName()
|
||||
} else {
|
||||
if v.CanAddr() {
|
||||
if tb, ok = v.Addr().Interface().(TableName); ok {
|
||||
table.Name = tb.TableName()
|
||||
}
|
||||
}
|
||||
if table.Name == "" {
|
||||
table.Name = engine.TableMapper.Obj2Table(t.Name())
|
||||
}
|
||||
}
|
||||
|
||||
table := core.NewEmptyTable()
|
||||
table.Type = t
|
||||
table.Name = engine.tbNameForMap(v)
|
||||
|
||||
var idFieldColName string
|
||||
var hasCacheTag, hasNoCacheTag bool
|
||||
@@ -1049,15 +1024,15 @@ func (engine *Engine) mapType(v reflect.Value) (*core.Table, error) {
|
||||
if hasCacheTag {
|
||||
if engine.Cacher != nil { // !nash! use engine's cacher if provided
|
||||
engine.logger.Info("enable cache on table:", table.Name)
|
||||
table.Cacher = engine.Cacher
|
||||
engine.setCacher(table.Name, engine.Cacher)
|
||||
} else {
|
||||
engine.logger.Info("enable LRU cache on table:", table.Name)
|
||||
table.Cacher = NewLRUCacher2(NewMemoryStore(), time.Hour, 10000) // !nashtsai! HACK use LRU cacher for now
|
||||
engine.setCacher(table.Name, NewLRUCacher2(NewMemoryStore(), time.Hour, 10000))
|
||||
}
|
||||
}
|
||||
if hasNoCacheTag {
|
||||
engine.logger.Info("no cache on table:", table.Name)
|
||||
table.Cacher = nil
|
||||
engine.logger.Info("disable cache on table:", table.Name)
|
||||
engine.setCacher(table.Name, nil)
|
||||
}
|
||||
|
||||
return table, nil
|
||||
@@ -1162,26 +1137,10 @@ func (engine *Engine) CreateUniques(bean interface{}) error {
|
||||
return session.CreateUniques(bean)
|
||||
}
|
||||
|
||||
func (engine *Engine) getCacher2(table *core.Table) core.Cacher {
|
||||
return table.Cacher
|
||||
}
|
||||
|
||||
// ClearCacheBean if enabled cache, clear the cache bean
|
||||
func (engine *Engine) ClearCacheBean(bean interface{}, id string) error {
|
||||
v := rValue(bean)
|
||||
t := v.Type()
|
||||
if t.Kind() != reflect.Struct {
|
||||
return errors.New("error params")
|
||||
}
|
||||
tableName := engine.tbName(v)
|
||||
table, err := engine.autoMapType(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacher := table.Cacher
|
||||
if cacher == nil {
|
||||
cacher = engine.Cacher
|
||||
}
|
||||
tableName := engine.TableName(bean)
|
||||
cacher := engine.getCacher(tableName)
|
||||
if cacher != nil {
|
||||
cacher.ClearIds(tableName)
|
||||
cacher.DelBean(tableName, id)
|
||||
@@ -1192,21 +1151,8 @@ func (engine *Engine) ClearCacheBean(bean interface{}, id string) error {
|
||||
// ClearCache if enabled cache, clear some tables' cache
|
||||
func (engine *Engine) ClearCache(beans ...interface{}) error {
|
||||
for _, bean := range beans {
|
||||
v := rValue(bean)
|
||||
t := v.Type()
|
||||
if t.Kind() != reflect.Struct {
|
||||
return errors.New("error params")
|
||||
}
|
||||
tableName := engine.tbName(v)
|
||||
table, err := engine.autoMapType(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cacher := table.Cacher
|
||||
if cacher == nil {
|
||||
cacher = engine.Cacher
|
||||
}
|
||||
tableName := engine.TableName(bean)
|
||||
cacher := engine.getCacher(tableName)
|
||||
if cacher != nil {
|
||||
cacher.ClearIds(tableName)
|
||||
cacher.ClearBeans(tableName)
|
||||
@@ -1224,13 +1170,13 @@ func (engine *Engine) Sync(beans ...interface{}) error {
|
||||
|
||||
for _, bean := range beans {
|
||||
v := rValue(bean)
|
||||
tableName := engine.tbName(v)
|
||||
tableNameNoSchema := engine.tbNameNoSchema(v.Interface())
|
||||
table, err := engine.autoMapType(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isExist, err := session.Table(bean).isTableExist(tableName)
|
||||
isExist, err := session.Table(bean).isTableExist(tableNameNoSchema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1256,12 +1202,12 @@ func (engine *Engine) Sync(beans ...interface{}) error {
|
||||
}
|
||||
} else {
|
||||
for _, col := range table.Columns() {
|
||||
isExist, err := engine.dialect.IsColumnExist(tableName, col.Name)
|
||||
isExist, err := engine.dialect.IsColumnExist(tableNameNoSchema, col.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isExist {
|
||||
if err := session.statement.setRefValue(v); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return err
|
||||
}
|
||||
err = session.addColumn(col.Name)
|
||||
@@ -1272,35 +1218,35 @@ func (engine *Engine) Sync(beans ...interface{}) error {
|
||||
}
|
||||
|
||||
for name, index := range table.Indexes {
|
||||
if err := session.statement.setRefValue(v); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return err
|
||||
}
|
||||
if index.Type == core.UniqueType {
|
||||
isExist, err := session.isIndexExist2(tableName, index.Cols, true)
|
||||
isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isExist {
|
||||
if err := session.statement.setRefValue(v); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = session.addUnique(tableName, name)
|
||||
err = session.addUnique(tableNameNoSchema, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if index.Type == core.IndexType {
|
||||
isExist, err := session.isIndexExist2(tableName, index.Cols, false)
|
||||
isExist, err := session.isIndexExist2(tableNameNoSchema, index.Cols, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isExist {
|
||||
if err := session.statement.setRefValue(v); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = session.addIndex(tableName, name)
|
||||
err = session.addIndex(tableNameNoSchema, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1453,6 +1399,13 @@ func (engine *Engine) Find(beans interface{}, condiBeans ...interface{}) error {
|
||||
return session.Find(beans, condiBeans...)
|
||||
}
|
||||
|
||||
// FindAndCount find the results and also return the counts
|
||||
func (engine *Engine) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) {
|
||||
session := engine.NewSession()
|
||||
defer session.Close()
|
||||
return session.FindAndCount(rowsSlicePtr, condiBean...)
|
||||
}
|
||||
|
||||
// Iterate record by record handle records from table, bean's non-empty fields
|
||||
// are conditions.
|
||||
func (engine *Engine) Iterate(bean interface{}, fun IterFunc) error {
|
||||
@@ -1629,6 +1582,11 @@ func (engine *Engine) SetTZDatabase(tz *time.Location) {
|
||||
engine.DatabaseTZ = tz
|
||||
}
|
||||
|
||||
// SetSchema sets the schema of database
|
||||
func (engine *Engine) SetSchema(schema string) {
|
||||
engine.dialect.URI().Schema = schema
|
||||
}
|
||||
|
||||
// Unscoped always disable struct tag "deleted"
|
||||
func (engine *Engine) Unscoped() *Session {
|
||||
session := engine.NewSession()
|
||||
|
||||
5
vendor/github.com/go-xorm/xorm/engine_cond.go
generated
vendored
5
vendor/github.com/go-xorm/xorm/engine_cond.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/builder"
|
||||
@@ -51,7 +52,9 @@ func (engine *Engine) buildConds(table *core.Table, bean interface{},
|
||||
|
||||
fieldValuePtr, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
engine.logger.Error(err)
|
||||
if !strings.Contains(err.Error(), "is not valid") {
|
||||
engine.logger.Warn(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
113
vendor/github.com/go-xorm/xorm/engine_table.go
generated
vendored
Normal file
113
vendor/github.com/go-xorm/xorm/engine_table.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2018 The Xorm Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-xorm/core"
|
||||
)
|
||||
|
||||
// TableNameWithSchema will automatically add schema prefix on table name
|
||||
func (engine *Engine) tbNameWithSchema(v string) string {
|
||||
// Add schema name as prefix of table name.
|
||||
// Only for postgres database.
|
||||
if engine.dialect.DBType() == core.POSTGRES &&
|
||||
engine.dialect.URI().Schema != "" &&
|
||||
engine.dialect.URI().Schema != postgresPublicSchema &&
|
||||
strings.Index(v, ".") == -1 {
|
||||
return engine.dialect.URI().Schema + "." + v
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// TableName returns table name with schema prefix if has
|
||||
func (engine *Engine) TableName(bean interface{}, includeSchema ...bool) string {
|
||||
tbName := engine.tbNameNoSchema(bean)
|
||||
if len(includeSchema) > 0 && includeSchema[0] {
|
||||
tbName = engine.tbNameWithSchema(tbName)
|
||||
}
|
||||
|
||||
return tbName
|
||||
}
|
||||
|
||||
// tbName get some table's table name
|
||||
func (session *Session) tbNameNoSchema(table *core.Table) string {
|
||||
if len(session.statement.AltTableName) > 0 {
|
||||
return session.statement.AltTableName
|
||||
}
|
||||
|
||||
return table.Name
|
||||
}
|
||||
|
||||
func (engine *Engine) tbNameForMap(v reflect.Value) string {
|
||||
if v.Type().Implements(tpTableName) {
|
||||
return v.Interface().(TableName).TableName()
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
if v.Type().Implements(tpTableName) {
|
||||
return v.Interface().(TableName).TableName()
|
||||
}
|
||||
}
|
||||
|
||||
return engine.TableMapper.Obj2Table(v.Type().Name())
|
||||
}
|
||||
|
||||
func (engine *Engine) tbNameNoSchema(tablename interface{}) string {
|
||||
switch tablename.(type) {
|
||||
case []string:
|
||||
t := tablename.([]string)
|
||||
if len(t) > 1 {
|
||||
return fmt.Sprintf("%v AS %v", engine.Quote(t[0]), engine.Quote(t[1]))
|
||||
} else if len(t) == 1 {
|
||||
return engine.Quote(t[0])
|
||||
}
|
||||
case []interface{}:
|
||||
t := tablename.([]interface{})
|
||||
l := len(t)
|
||||
var table string
|
||||
if l > 0 {
|
||||
f := t[0]
|
||||
switch f.(type) {
|
||||
case string:
|
||||
table = f.(string)
|
||||
case TableName:
|
||||
table = f.(TableName).TableName()
|
||||
default:
|
||||
v := rValue(f)
|
||||
t := v.Type()
|
||||
if t.Kind() == reflect.Struct {
|
||||
table = engine.tbNameForMap(v)
|
||||
} else {
|
||||
table = engine.Quote(fmt.Sprintf("%v", f))
|
||||
}
|
||||
}
|
||||
}
|
||||
if l > 1 {
|
||||
return fmt.Sprintf("%v AS %v", engine.Quote(table),
|
||||
engine.Quote(fmt.Sprintf("%v", t[1])))
|
||||
} else if l == 1 {
|
||||
return engine.Quote(table)
|
||||
}
|
||||
case TableName:
|
||||
return tablename.(TableName).TableName()
|
||||
case string:
|
||||
return tablename.(string)
|
||||
case reflect.Value:
|
||||
v := tablename.(reflect.Value)
|
||||
return engine.tbNameForMap(v)
|
||||
default:
|
||||
v := rValue(tablename)
|
||||
t := v.Type()
|
||||
if t.Kind() == reflect.Struct {
|
||||
return engine.tbNameForMap(v)
|
||||
}
|
||||
return engine.Quote(fmt.Sprintf("%v", tablename))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
13
vendor/github.com/go-xorm/xorm/error.go
generated
vendored
13
vendor/github.com/go-xorm/xorm/error.go
generated
vendored
@@ -6,6 +6,7 @@ package xorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,4 +26,16 @@ var (
|
||||
ErrNotImplemented = errors.New("Not implemented")
|
||||
// ErrConditionType condition type unsupported
|
||||
ErrConditionType = errors.New("Unsupported conditon type")
|
||||
// ErrColumnIsNotExist columns is not exist
|
||||
ErrFieldIsNotExist = errors.New("Field is not exist")
|
||||
)
|
||||
|
||||
// ErrFieldIsNotValid is not valid
|
||||
type ErrFieldIsNotValid struct {
|
||||
FieldName string
|
||||
TableName string
|
||||
}
|
||||
|
||||
func (e ErrFieldIsNotValid) Error() string {
|
||||
return fmt.Sprintf("field %s is not valid on table %s", e.FieldName, e.TableName)
|
||||
}
|
||||
|
||||
162
vendor/github.com/go-xorm/xorm/helpers.go
generated
vendored
162
vendor/github.com/go-xorm/xorm/helpers.go
generated
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/core"
|
||||
)
|
||||
@@ -293,19 +292,6 @@ func structName(v reflect.Type) string {
|
||||
return v.Name()
|
||||
}
|
||||
|
||||
func col2NewCols(columns ...string) []string {
|
||||
newColumns := make([]string, 0, len(columns))
|
||||
for _, col := range columns {
|
||||
col = strings.Replace(col, "`", "", -1)
|
||||
col = strings.Replace(col, `"`, "", -1)
|
||||
ccols := strings.Split(col, ",")
|
||||
for _, c := range ccols {
|
||||
newColumns = append(newColumns, strings.TrimSpace(c))
|
||||
}
|
||||
}
|
||||
return newColumns
|
||||
}
|
||||
|
||||
func sliceEq(left, right []string) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
@@ -320,154 +306,6 @@ func sliceEq(left, right []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func setColumnInt(bean interface{}, col *core.Column, t int64) {
|
||||
v, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v.CanSet() {
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Int, reflect.Int64, reflect.Int32:
|
||||
v.SetInt(t)
|
||||
case reflect.Uint, reflect.Uint64, reflect.Uint32:
|
||||
v.SetUint(uint64(t))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setColumnTime(bean interface{}, col *core.Column, t time.Time) {
|
||||
v, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v.CanSet() {
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Struct:
|
||||
v.Set(reflect.ValueOf(t).Convert(v.Type()))
|
||||
case reflect.Int, reflect.Int64, reflect.Int32:
|
||||
v.SetInt(t.Unix())
|
||||
case reflect.Uint, reflect.Uint64, reflect.Uint32:
|
||||
v.SetUint(uint64(t.Unix()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genCols(table *core.Table, session *Session, bean interface{}, useCol bool, includeQuote bool) ([]string, []interface{}, error) {
|
||||
colNames := make([]string, 0, len(table.ColumnsSeq()))
|
||||
args := make([]interface{}, 0, len(table.ColumnsSeq()))
|
||||
|
||||
for _, col := range table.Columns() {
|
||||
if useCol && !col.IsVersion && !col.IsCreated && !col.IsUpdated {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if col.MapType == core.ONLYFROMDB {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldValuePtr, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fieldValue := *fieldValuePtr
|
||||
|
||||
if col.IsAutoIncrement {
|
||||
switch fieldValue.Type().Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64:
|
||||
if fieldValue.Int() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64:
|
||||
if fieldValue.Uint() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.String:
|
||||
if len(fieldValue.String()) == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if fieldValue.Pointer() == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if col.IsDeleted {
|
||||
continue
|
||||
}
|
||||
|
||||
if session.statement.ColumnStr != "" {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); !ok {
|
||||
continue
|
||||
} else if _, ok := session.statement.incrColumns[col.Name]; ok {
|
||||
continue
|
||||
} else if _, ok := session.statement.decrColumns[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if session.statement.OmitStr != "" {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// !evalphobia! set fieldValue as nil when column is nullable and zero-value
|
||||
if _, ok := getFlagForColumn(session.statement.nullableMap, col); ok {
|
||||
if col.Nullable && isZero(fieldValue.Interface()) {
|
||||
var nilValue *int
|
||||
fieldValue = reflect.ValueOf(nilValue)
|
||||
}
|
||||
}
|
||||
|
||||
if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ {
|
||||
// if time is non-empty, then set to auto time
|
||||
val, t := session.engine.nowTime(col)
|
||||
args = append(args, val)
|
||||
|
||||
var colName = col.Name
|
||||
session.afterClosures = append(session.afterClosures, func(bean interface{}) {
|
||||
col := table.GetColumn(colName)
|
||||
setColumnTime(bean, col, t)
|
||||
})
|
||||
} else if col.IsVersion && session.statement.checkVersion {
|
||||
args = append(args, 1)
|
||||
} else {
|
||||
arg, err := session.value2Interface(col, fieldValue)
|
||||
if err != nil {
|
||||
return colNames, args, err
|
||||
}
|
||||
args = append(args, arg)
|
||||
}
|
||||
|
||||
if includeQuote {
|
||||
colNames = append(colNames, session.engine.Quote(col.Name)+" = ?")
|
||||
} else {
|
||||
colNames = append(colNames, col.Name)
|
||||
}
|
||||
}
|
||||
return colNames, args, nil
|
||||
}
|
||||
|
||||
func indexName(tableName, idxName string) string {
|
||||
return fmt.Sprintf("IDX_%v_%v", tableName, idxName)
|
||||
}
|
||||
|
||||
func getFlagForColumn(m map[string]bool, col *core.Column) (val bool, has bool) {
|
||||
if len(m) == 0 {
|
||||
return false, false
|
||||
}
|
||||
|
||||
n := len(col.Name)
|
||||
|
||||
for mk := range m {
|
||||
if len(mk) != n {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(mk, col.Name) {
|
||||
return m[mk], true
|
||||
}
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
6
vendor/github.com/go-xorm/xorm/interface.go
generated
vendored
6
vendor/github.com/go-xorm/xorm/interface.go
generated
vendored
@@ -30,6 +30,7 @@ type Interface interface {
|
||||
Exec(string, ...interface{}) (sql.Result, error)
|
||||
Exist(bean ...interface{}) (bool, error)
|
||||
Find(interface{}, ...interface{}) error
|
||||
FindAndCount(interface{}, ...interface{}) (int64, error)
|
||||
Get(interface{}) (bool, error)
|
||||
GroupBy(keys string) *Session
|
||||
ID(interface{}) *Session
|
||||
@@ -41,6 +42,7 @@ type Interface interface {
|
||||
IsTableExist(beanOrTableName interface{}) (bool, error)
|
||||
Iterate(interface{}, IterFunc) error
|
||||
Limit(int, ...int) *Session
|
||||
MustCols(columns ...string) *Session
|
||||
NoAutoCondition(...bool) *Session
|
||||
NotIn(string, ...interface{}) *Session
|
||||
Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *Session
|
||||
@@ -75,6 +77,7 @@ type EngineInterface interface {
|
||||
Dialect() core.Dialect
|
||||
DropTables(...interface{}) error
|
||||
DumpAllToFile(fp string, tp ...core.DbType) error
|
||||
GetCacher(string) core.Cacher
|
||||
GetColumnMapper() core.IMapper
|
||||
GetDefaultCacher() core.Cacher
|
||||
GetTableMapper() core.IMapper
|
||||
@@ -83,9 +86,11 @@ type EngineInterface interface {
|
||||
NewSession() *Session
|
||||
NoAutoTime() *Session
|
||||
Quote(string) string
|
||||
SetCacher(string, core.Cacher)
|
||||
SetDefaultCacher(core.Cacher)
|
||||
SetLogLevel(core.LogLevel)
|
||||
SetMapper(core.IMapper)
|
||||
SetSchema(string)
|
||||
SetTZDatabase(tz *time.Location)
|
||||
SetTZLocation(tz *time.Location)
|
||||
ShowSQL(show ...bool)
|
||||
@@ -93,6 +98,7 @@ type EngineInterface interface {
|
||||
Sync2(...interface{}) error
|
||||
StoreEngine(storeEngine string) *Session
|
||||
TableInfo(bean interface{}) *Table
|
||||
TableName(interface{}, ...bool) string
|
||||
UnMapType(reflect.Type)
|
||||
}
|
||||
|
||||
|
||||
6
vendor/github.com/go-xorm/xorm/rows.go
generated
vendored
6
vendor/github.com/go-xorm/xorm/rows.go
generated
vendored
@@ -32,7 +32,7 @@ func newRows(session *Session, bean interface{}) (*Rows, error) {
|
||||
var args []interface{}
|
||||
var err error
|
||||
|
||||
if err = rows.session.statement.setRefValue(rValue(bean)); err != nil {
|
||||
if err = rows.session.statement.setRefBean(bean); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -94,8 +94,7 @@ func (rows *Rows) Scan(bean interface{}) error {
|
||||
return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType)
|
||||
}
|
||||
|
||||
dataStruct := rValue(bean)
|
||||
if err := rows.session.statement.setRefValue(dataStruct); err != nil {
|
||||
if err := rows.session.statement.setRefBean(bean); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -104,6 +103,7 @@ func (rows *Rows) Scan(bean interface{}) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dataStruct := rValue(bean)
|
||||
_, err = rows.session.slice2Bean(scanResults, rows.fields, bean, &dataStruct, rows.session.statement.RefTable)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
761
vendor/github.com/go-xorm/xorm/session.go
generated
vendored
761
vendor/github.com/go-xorm/xorm/session.go
generated
vendored
@@ -278,24 +278,22 @@ func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt,
|
||||
return
|
||||
}
|
||||
|
||||
func (session *Session) getField(dataStruct *reflect.Value, key string, table *core.Table, idx int) *reflect.Value {
|
||||
func (session *Session) getField(dataStruct *reflect.Value, key string, table *core.Table, idx int) (*reflect.Value, error) {
|
||||
var col *core.Column
|
||||
if col = table.GetColumnIdx(key, idx); col == nil {
|
||||
//session.engine.logger.Warnf("table %v has no column %v. %v", table.Name, key, table.ColumnsSeq())
|
||||
return nil
|
||||
return nil, ErrFieldIsNotExist
|
||||
}
|
||||
|
||||
fieldValue, err := col.ValueOfV(dataStruct)
|
||||
if err != nil {
|
||||
session.engine.logger.Error(err)
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !fieldValue.IsValid() || !fieldValue.CanSet() {
|
||||
session.engine.logger.Warnf("table %v's column %v is not valid or cannot set", table.Name, key)
|
||||
return nil
|
||||
return nil, ErrFieldIsNotValid{key, table.Name}
|
||||
}
|
||||
return fieldValue
|
||||
|
||||
return fieldValue, nil
|
||||
}
|
||||
|
||||
// Cell cell is a result of one column field
|
||||
@@ -407,409 +405,417 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b
|
||||
}
|
||||
tempMap[lKey] = idx
|
||||
|
||||
if fieldValue := session.getField(dataStruct, key, table, idx); fieldValue != nil {
|
||||
rawValue := reflect.Indirect(reflect.ValueOf(scanResults[ii]))
|
||||
|
||||
// if row is null then ignore
|
||||
if rawValue.Interface() == nil {
|
||||
continue
|
||||
fieldValue, err := session.getField(dataStruct, key, table, idx)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "is not valid") {
|
||||
session.engine.logger.Warn(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fieldValue == nil {
|
||||
continue
|
||||
}
|
||||
rawValue := reflect.Indirect(reflect.ValueOf(scanResults[ii]))
|
||||
|
||||
if fieldValue.CanAddr() {
|
||||
if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
|
||||
if data, err := value2Bytes(&rawValue); err == nil {
|
||||
if err := structConvert.FromDB(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// if row is null then ignore
|
||||
if rawValue.Interface() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldValue.CanAddr() {
|
||||
if structConvert, ok := fieldValue.Addr().Interface().(core.Conversion); ok {
|
||||
if data, err := value2Bytes(&rawValue); err == nil {
|
||||
if err := structConvert.FromDB(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := fieldValue.Interface().(core.Conversion); ok {
|
||||
if data, err := value2Bytes(&rawValue); err == nil {
|
||||
if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() {
|
||||
fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
|
||||
}
|
||||
fieldValue.Interface().(core.Conversion).FromDB(data)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
rawValueType := reflect.TypeOf(rawValue.Interface())
|
||||
vv := reflect.ValueOf(rawValue.Interface())
|
||||
col := table.GetColumnIdx(key, idx)
|
||||
if col.IsPrimaryKey {
|
||||
pk = append(pk, rawValue.Interface())
|
||||
if _, ok := fieldValue.Interface().(core.Conversion); ok {
|
||||
if data, err := value2Bytes(&rawValue); err == nil {
|
||||
if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() {
|
||||
fieldValue.Set(reflect.New(fieldValue.Type().Elem()))
|
||||
}
|
||||
fieldValue.Interface().(core.Conversion).FromDB(data)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
fieldType := fieldValue.Type()
|
||||
hasAssigned := false
|
||||
continue
|
||||
}
|
||||
|
||||
if col.SQLType.IsJson() {
|
||||
var bs []byte
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
bs = []byte(vv.String())
|
||||
} else if rawValueType.ConvertibleTo(core.BytesType) {
|
||||
bs = vv.Bytes()
|
||||
rawValueType := reflect.TypeOf(rawValue.Interface())
|
||||
vv := reflect.ValueOf(rawValue.Interface())
|
||||
col := table.GetColumnIdx(key, idx)
|
||||
if col.IsPrimaryKey {
|
||||
pk = append(pk, rawValue.Interface())
|
||||
}
|
||||
fieldType := fieldValue.Type()
|
||||
hasAssigned := false
|
||||
|
||||
if col.SQLType.IsJson() {
|
||||
var bs []byte
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
bs = []byte(vv.String())
|
||||
} else if rawValueType.ConvertibleTo(core.BytesType) {
|
||||
bs = vv.Bytes()
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported database data type: %s %v", key, rawValueType.Kind())
|
||||
}
|
||||
|
||||
hasAssigned = true
|
||||
|
||||
if len(bs) > 0 {
|
||||
if fieldType.Kind() == reflect.String {
|
||||
fieldValue.SetString(string(bs))
|
||||
continue
|
||||
}
|
||||
if fieldValue.CanAddr() {
|
||||
err := json.Unmarshal(bs, fieldValue.Addr().Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported database data type: %s %v", key, rawValueType.Kind())
|
||||
}
|
||||
|
||||
hasAssigned = true
|
||||
|
||||
if len(bs) > 0 {
|
||||
if fieldType.Kind() == reflect.String {
|
||||
fieldValue.SetString(string(bs))
|
||||
continue
|
||||
}
|
||||
if fieldValue.CanAddr() {
|
||||
err := json.Unmarshal(bs, fieldValue.Addr().Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
x := reflect.New(fieldType)
|
||||
err := json.Unmarshal(bs, x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
x := reflect.New(fieldType)
|
||||
err := json.Unmarshal(bs, x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
switch fieldType.Kind() {
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
// TODO: reimplement this
|
||||
var bs []byte
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
bs = []byte(vv.String())
|
||||
} else if rawValueType.ConvertibleTo(core.BytesType) {
|
||||
bs = vv.Bytes()
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
hasAssigned = true
|
||||
if len(bs) > 0 {
|
||||
if fieldValue.CanAddr() {
|
||||
err := json.Unmarshal(bs, fieldValue.Addr().Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
x := reflect.New(fieldType)
|
||||
err := json.Unmarshal(bs, x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
switch fieldType.Kind() {
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
// TODO: reimplement this
|
||||
var bs []byte
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
bs = []byte(vv.String())
|
||||
} else if rawValueType.ConvertibleTo(core.BytesType) {
|
||||
bs = vv.Bytes()
|
||||
}
|
||||
|
||||
hasAssigned = true
|
||||
if len(bs) > 0 {
|
||||
if fieldValue.CanAddr() {
|
||||
err := json.Unmarshal(bs, fieldValue.Addr().Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
x := reflect.New(fieldType)
|
||||
err := json.Unmarshal(bs, x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
}
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
switch rawValueType.Elem().Kind() {
|
||||
case reflect.Uint8:
|
||||
if fieldType.Elem().Kind() == reflect.Uint8 {
|
||||
hasAssigned = true
|
||||
if col.SQLType.IsText() {
|
||||
x := reflect.New(fieldType)
|
||||
err := json.Unmarshal(vv.Bytes(), x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
} else {
|
||||
if fieldValue.Len() > 0 {
|
||||
for i := 0; i < fieldValue.Len(); i++ {
|
||||
if i < vv.Len() {
|
||||
fieldValue.Index(i).Set(vv.Index(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < vv.Len(); i++ {
|
||||
fieldValue.Set(reflect.Append(*fieldValue, vv.Index(i)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
hasAssigned = true
|
||||
fieldValue.SetString(vv.String())
|
||||
}
|
||||
case reflect.Bool:
|
||||
if rawValueType.Kind() == reflect.Bool {
|
||||
hasAssigned = true
|
||||
fieldValue.SetBool(vv.Bool())
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
hasAssigned = true
|
||||
fieldValue.SetInt(vv.Int())
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
hasAssigned = true
|
||||
fieldValue.SetFloat(vv.Float())
|
||||
}
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
hasAssigned = true
|
||||
fieldValue.SetUint(vv.Uint())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
hasAssigned = true
|
||||
fieldValue.SetUint(uint64(vv.Int()))
|
||||
}
|
||||
case reflect.Struct:
|
||||
if fieldType.ConvertibleTo(core.TimeType) {
|
||||
dbTZ := session.engine.DatabaseTZ
|
||||
if col.TimeZone != nil {
|
||||
dbTZ = col.TimeZone
|
||||
}
|
||||
|
||||
if rawValueType == core.TimeType {
|
||||
switch rawValueType.Elem().Kind() {
|
||||
case reflect.Uint8:
|
||||
if fieldType.Elem().Kind() == reflect.Uint8 {
|
||||
hasAssigned = true
|
||||
|
||||
t := vv.Convert(core.TimeType).Interface().(time.Time)
|
||||
|
||||
z, _ := t.Zone()
|
||||
// set new location if database don't save timezone or give an incorrect timezone
|
||||
if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbTZ.String() { // !nashtsai! HACK tmp work around for lib/pq doesn't properly time with location
|
||||
session.engine.logger.Debugf("empty zone key[%v] : %v | zone: %v | location: %+v\n", key, t, z, *t.Location())
|
||||
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(),
|
||||
t.Minute(), t.Second(), t.Nanosecond(), dbTZ)
|
||||
}
|
||||
|
||||
t = t.In(session.engine.TZLocation)
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
} else if rawValueType == core.IntType || rawValueType == core.Int64Type ||
|
||||
rawValueType == core.Int32Type {
|
||||
hasAssigned = true
|
||||
|
||||
t := time.Unix(vv.Int(), 0).In(session.engine.TZLocation)
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
} else {
|
||||
if d, ok := vv.Interface().([]uint8); ok {
|
||||
hasAssigned = true
|
||||
t, err := session.byte2Time(col, d)
|
||||
if err != nil {
|
||||
session.engine.logger.Error("byte2Time error:", err.Error())
|
||||
hasAssigned = false
|
||||
} else {
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
}
|
||||
} else if d, ok := vv.Interface().(string); ok {
|
||||
hasAssigned = true
|
||||
t, err := session.str2Time(col, d)
|
||||
if err != nil {
|
||||
session.engine.logger.Error("byte2Time error:", err.Error())
|
||||
hasAssigned = false
|
||||
} else {
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("rawValueType is %v, value is %v", rawValueType, vv.Interface())
|
||||
}
|
||||
}
|
||||
} else if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
|
||||
// !<winxxp>! 增加支持sql.Scanner接口的结构,如sql.NullString
|
||||
hasAssigned = true
|
||||
if err := nulVal.Scan(vv.Interface()); err != nil {
|
||||
session.engine.logger.Error("sql.Sanner error:", err.Error())
|
||||
hasAssigned = false
|
||||
}
|
||||
} else if col.SQLType.IsJson() {
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
hasAssigned = true
|
||||
x := reflect.New(fieldType)
|
||||
if len([]byte(vv.String())) > 0 {
|
||||
err := json.Unmarshal([]byte(vv.String()), x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
}
|
||||
} else if rawValueType.Kind() == reflect.Slice {
|
||||
hasAssigned = true
|
||||
x := reflect.New(fieldType)
|
||||
if len(vv.Bytes()) > 0 {
|
||||
if col.SQLType.IsText() {
|
||||
x := reflect.New(fieldType)
|
||||
err := json.Unmarshal(vv.Bytes(), x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
}
|
||||
}
|
||||
} else if session.statement.UseCascade {
|
||||
table, err := session.engine.autoMapType(*fieldValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasAssigned = true
|
||||
if len(table.PrimaryKeys) != 1 {
|
||||
return nil, errors.New("unsupported non or composited primary key cascade")
|
||||
}
|
||||
var pk = make(core.PK, len(table.PrimaryKeys))
|
||||
pk[0], err = asKind(vv, rawValueType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isPKZero(pk) {
|
||||
// !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch
|
||||
// however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne
|
||||
// property to be fetched lazily
|
||||
structInter := reflect.New(fieldValue.Type())
|
||||
has, err := session.ID(pk).NoCascade().get(structInter.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if has {
|
||||
fieldValue.Set(structInter.Elem())
|
||||
} else {
|
||||
return nil, errors.New("cascade obj is not exist")
|
||||
if fieldValue.Len() > 0 {
|
||||
for i := 0; i < fieldValue.Len(); i++ {
|
||||
if i < vv.Len() {
|
||||
fieldValue.Index(i).Set(vv.Index(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < vv.Len(); i++ {
|
||||
fieldValue.Set(reflect.Append(*fieldValue, vv.Index(i)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
// !nashtsai! TODO merge duplicated codes above
|
||||
switch fieldType {
|
||||
// following types case matching ptr's native type, therefore assign ptr directly
|
||||
case core.PtrStringType:
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
x := vv.String()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrBoolType:
|
||||
if rawValueType.Kind() == reflect.Bool {
|
||||
x := vv.Bool()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrTimeType:
|
||||
if rawValueType == core.PtrTimeType {
|
||||
hasAssigned = true
|
||||
var x = rawValue.Interface().(time.Time)
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrFloat64Type:
|
||||
if rawValueType.Kind() == reflect.Float64 {
|
||||
x := vv.Float()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrUint64Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint64(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt64Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
x := vv.Int()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrFloat32Type:
|
||||
if rawValueType.Kind() == reflect.Float64 {
|
||||
var x = float32(vv.Float())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrIntType:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt32Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int32(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt8Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int8(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt16Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int16(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrUintType:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrUint32Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint32(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.Uint8Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint8(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.Uint16Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint16(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.Complex64Type:
|
||||
var x complex64
|
||||
if len([]byte(vv.String())) > 0 {
|
||||
err := json.Unmarshal([]byte(vv.String()), &x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
hasAssigned = true
|
||||
case core.Complex128Type:
|
||||
var x complex128
|
||||
if len([]byte(vv.String())) > 0 {
|
||||
err := json.Unmarshal([]byte(vv.String()), &x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
hasAssigned = true
|
||||
} // switch fieldType
|
||||
} // switch fieldType.Kind()
|
||||
}
|
||||
case reflect.String:
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
hasAssigned = true
|
||||
fieldValue.SetString(vv.String())
|
||||
}
|
||||
case reflect.Bool:
|
||||
if rawValueType.Kind() == reflect.Bool {
|
||||
hasAssigned = true
|
||||
fieldValue.SetBool(vv.Bool())
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
hasAssigned = true
|
||||
fieldValue.SetInt(vv.Int())
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
hasAssigned = true
|
||||
fieldValue.SetFloat(vv.Float())
|
||||
}
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
switch rawValueType.Kind() {
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
hasAssigned = true
|
||||
fieldValue.SetUint(vv.Uint())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
hasAssigned = true
|
||||
fieldValue.SetUint(uint64(vv.Int()))
|
||||
}
|
||||
case reflect.Struct:
|
||||
if fieldType.ConvertibleTo(core.TimeType) {
|
||||
dbTZ := session.engine.DatabaseTZ
|
||||
if col.TimeZone != nil {
|
||||
dbTZ = col.TimeZone
|
||||
}
|
||||
|
||||
// !nashtsai! for value can't be assigned directly fallback to convert to []byte then back to value
|
||||
if !hasAssigned {
|
||||
data, err := value2Bytes(&rawValue)
|
||||
if rawValueType == core.TimeType {
|
||||
hasAssigned = true
|
||||
|
||||
t := vv.Convert(core.TimeType).Interface().(time.Time)
|
||||
|
||||
z, _ := t.Zone()
|
||||
// set new location if database don't save timezone or give an incorrect timezone
|
||||
if len(z) == 0 || t.Year() == 0 || t.Location().String() != dbTZ.String() { // !nashtsai! HACK tmp work around for lib/pq doesn't properly time with location
|
||||
session.engine.logger.Debugf("empty zone key[%v] : %v | zone: %v | location: %+v\n", key, t, z, *t.Location())
|
||||
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(),
|
||||
t.Minute(), t.Second(), t.Nanosecond(), dbTZ)
|
||||
}
|
||||
|
||||
t = t.In(session.engine.TZLocation)
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
} else if rawValueType == core.IntType || rawValueType == core.Int64Type ||
|
||||
rawValueType == core.Int32Type {
|
||||
hasAssigned = true
|
||||
|
||||
t := time.Unix(vv.Int(), 0).In(session.engine.TZLocation)
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
} else {
|
||||
if d, ok := vv.Interface().([]uint8); ok {
|
||||
hasAssigned = true
|
||||
t, err := session.byte2Time(col, d)
|
||||
if err != nil {
|
||||
session.engine.logger.Error("byte2Time error:", err.Error())
|
||||
hasAssigned = false
|
||||
} else {
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
}
|
||||
} else if d, ok := vv.Interface().(string); ok {
|
||||
hasAssigned = true
|
||||
t, err := session.str2Time(col, d)
|
||||
if err != nil {
|
||||
session.engine.logger.Error("byte2Time error:", err.Error())
|
||||
hasAssigned = false
|
||||
} else {
|
||||
fieldValue.Set(reflect.ValueOf(t).Convert(fieldType))
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("rawValueType is %v, value is %v", rawValueType, vv.Interface())
|
||||
}
|
||||
}
|
||||
} else if nulVal, ok := fieldValue.Addr().Interface().(sql.Scanner); ok {
|
||||
// !<winxxp>! 增加支持sql.Scanner接口的结构,如sql.NullString
|
||||
hasAssigned = true
|
||||
if err := nulVal.Scan(vv.Interface()); err != nil {
|
||||
session.engine.logger.Error("sql.Sanner error:", err.Error())
|
||||
hasAssigned = false
|
||||
}
|
||||
} else if col.SQLType.IsJson() {
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
hasAssigned = true
|
||||
x := reflect.New(fieldType)
|
||||
if len([]byte(vv.String())) > 0 {
|
||||
err := json.Unmarshal([]byte(vv.String()), x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
}
|
||||
} else if rawValueType.Kind() == reflect.Slice {
|
||||
hasAssigned = true
|
||||
x := reflect.New(fieldType)
|
||||
if len(vv.Bytes()) > 0 {
|
||||
err := json.Unmarshal(vv.Bytes(), x.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(x.Elem())
|
||||
}
|
||||
}
|
||||
} else if session.statement.UseCascade {
|
||||
table, err := session.engine.autoMapType(*fieldValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = session.bytes2Value(col, fieldValue, data); err != nil {
|
||||
hasAssigned = true
|
||||
if len(table.PrimaryKeys) != 1 {
|
||||
return nil, errors.New("unsupported non or composited primary key cascade")
|
||||
}
|
||||
var pk = make(core.PK, len(table.PrimaryKeys))
|
||||
pk[0], err = asKind(vv, rawValueType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isPKZero(pk) {
|
||||
// !nashtsai! TODO for hasOne relationship, it's preferred to use join query for eager fetch
|
||||
// however, also need to consider adding a 'lazy' attribute to xorm tag which allow hasOne
|
||||
// property to be fetched lazily
|
||||
structInter := reflect.New(fieldValue.Type())
|
||||
has, err := session.ID(pk).NoCascade().get(structInter.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if has {
|
||||
fieldValue.Set(structInter.Elem())
|
||||
} else {
|
||||
return nil, errors.New("cascade obj is not exist")
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
// !nashtsai! TODO merge duplicated codes above
|
||||
switch fieldType {
|
||||
// following types case matching ptr's native type, therefore assign ptr directly
|
||||
case core.PtrStringType:
|
||||
if rawValueType.Kind() == reflect.String {
|
||||
x := vv.String()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrBoolType:
|
||||
if rawValueType.Kind() == reflect.Bool {
|
||||
x := vv.Bool()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrTimeType:
|
||||
if rawValueType == core.PtrTimeType {
|
||||
hasAssigned = true
|
||||
var x = rawValue.Interface().(time.Time)
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrFloat64Type:
|
||||
if rawValueType.Kind() == reflect.Float64 {
|
||||
x := vv.Float()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrUint64Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint64(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt64Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
x := vv.Int()
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrFloat32Type:
|
||||
if rawValueType.Kind() == reflect.Float64 {
|
||||
var x = float32(vv.Float())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrIntType:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt32Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int32(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt8Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int8(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrInt16Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = int16(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrUintType:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.PtrUint32Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint32(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.Uint8Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint8(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.Uint16Type:
|
||||
if rawValueType.Kind() == reflect.Int64 {
|
||||
var x = uint16(vv.Int())
|
||||
hasAssigned = true
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
case core.Complex64Type:
|
||||
var x complex64
|
||||
if len([]byte(vv.String())) > 0 {
|
||||
err := json.Unmarshal([]byte(vv.String()), &x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
hasAssigned = true
|
||||
case core.Complex128Type:
|
||||
var x complex128
|
||||
if len([]byte(vv.String())) > 0 {
|
||||
err := json.Unmarshal([]byte(vv.String()), &x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fieldValue.Set(reflect.ValueOf(&x))
|
||||
}
|
||||
hasAssigned = true
|
||||
} // switch fieldType
|
||||
} // switch fieldType.Kind()
|
||||
|
||||
// !nashtsai! for value can't be assigned directly fallback to convert to []byte then back to value
|
||||
if !hasAssigned {
|
||||
data, err := value2Bytes(&rawValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = session.bytes2Value(col, fieldValue, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -828,15 +834,6 @@ func (session *Session) LastSQL() (string, []interface{}) {
|
||||
return session.lastSQL, session.lastSQLArgs
|
||||
}
|
||||
|
||||
// tbName get some table's table name
|
||||
func (session *Session) tbNameNoSchema(table *core.Table) string {
|
||||
if len(session.statement.AltTableName) > 0 {
|
||||
return session.statement.AltTableName
|
||||
}
|
||||
|
||||
return table.Name
|
||||
}
|
||||
|
||||
// Unscoped always disable struct tag "deleted"
|
||||
func (session *Session) Unscoped() *Session {
|
||||
session.statement.Unscoped()
|
||||
|
||||
107
vendor/github.com/go-xorm/xorm/session_cols.go
generated
vendored
107
vendor/github.com/go-xorm/xorm/session_cols.go
generated
vendored
@@ -4,6 +4,113 @@
|
||||
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-xorm/core"
|
||||
)
|
||||
|
||||
type incrParam struct {
|
||||
colName string
|
||||
arg interface{}
|
||||
}
|
||||
|
||||
type decrParam struct {
|
||||
colName string
|
||||
arg interface{}
|
||||
}
|
||||
|
||||
type exprParam struct {
|
||||
colName string
|
||||
expr string
|
||||
}
|
||||
|
||||
type columnMap []string
|
||||
|
||||
func (m columnMap) contain(colName string) bool {
|
||||
if len(m) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
n := len(colName)
|
||||
for _, mk := range m {
|
||||
if len(mk) != n {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(mk, colName) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func setColumnInt(bean interface{}, col *core.Column, t int64) {
|
||||
v, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v.CanSet() {
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Int, reflect.Int64, reflect.Int32:
|
||||
v.SetInt(t)
|
||||
case reflect.Uint, reflect.Uint64, reflect.Uint32:
|
||||
v.SetUint(uint64(t))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setColumnTime(bean interface{}, col *core.Column, t time.Time) {
|
||||
v, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v.CanSet() {
|
||||
switch v.Type().Kind() {
|
||||
case reflect.Struct:
|
||||
v.Set(reflect.ValueOf(t).Convert(v.Type()))
|
||||
case reflect.Int, reflect.Int64, reflect.Int32:
|
||||
v.SetInt(t.Unix())
|
||||
case reflect.Uint, reflect.Uint64, reflect.Uint32:
|
||||
v.SetUint(uint64(t.Unix()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getFlagForColumn(m map[string]bool, col *core.Column) (val bool, has bool) {
|
||||
if len(m) == 0 {
|
||||
return false, false
|
||||
}
|
||||
|
||||
n := len(col.Name)
|
||||
|
||||
for mk := range m {
|
||||
if len(mk) != n {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(mk, col.Name) {
|
||||
return m[mk], true
|
||||
}
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
func col2NewCols(columns ...string) []string {
|
||||
newColumns := make([]string, 0, len(columns))
|
||||
for _, col := range columns {
|
||||
col = strings.Replace(col, "`", "", -1)
|
||||
col = strings.Replace(col, `"`, "", -1)
|
||||
ccols := strings.Split(col, ",")
|
||||
for _, c := range ccols {
|
||||
newColumns = append(newColumns, strings.TrimSpace(c))
|
||||
}
|
||||
}
|
||||
return newColumns
|
||||
}
|
||||
|
||||
// Incr provides a query string like "count = count + 1"
|
||||
func (session *Session) Incr(column string, arg ...interface{}) *Session {
|
||||
session.statement.Incr(column, arg...)
|
||||
|
||||
6
vendor/github.com/go-xorm/xorm/session_delete.go
generated
vendored
6
vendor/github.com/go-xorm/xorm/session_delete.go
generated
vendored
@@ -27,7 +27,7 @@ func (session *Session) cacheDelete(table *core.Table, tableName, sqlStr string,
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
cacher := session.engine.getCacher2(table)
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
pkColumns := table.PKColumns()
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
if err != nil {
|
||||
@@ -79,7 +79,7 @@ func (session *Session) Delete(bean interface{}) (int64, error) {
|
||||
defer session.Close()
|
||||
}
|
||||
|
||||
if err := session.statement.setRefValue(rValue(bean)); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ func (session *Session) Delete(bean interface{}) (int64, error) {
|
||||
})
|
||||
}
|
||||
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil && session.statement.UseCache {
|
||||
if cacher := session.engine.getCacher(tableName); cacher != nil && session.statement.UseCache {
|
||||
session.cacheDelete(table, tableNameNoQuote, deleteSQL, argsForCache...)
|
||||
}
|
||||
|
||||
|
||||
15
vendor/github.com/go-xorm/xorm/session_exist.go
generated
vendored
15
vendor/github.com/go-xorm/xorm/session_exist.go
generated
vendored
@@ -10,6 +10,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/go-xorm/builder"
|
||||
"github.com/go-xorm/core"
|
||||
)
|
||||
|
||||
// Exist returns true if the record exist otherwise return false
|
||||
@@ -35,10 +36,18 @@ func (session *Session) Exist(bean ...interface{}) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE %s LIMIT 1", tableName, condSQL)
|
||||
if session.engine.dialect.DBType() == core.MSSQL {
|
||||
sqlStr = fmt.Sprintf("SELECT top 1 * FROM %s WHERE %s", tableName, condSQL)
|
||||
} else {
|
||||
sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE %s LIMIT 1", tableName, condSQL)
|
||||
}
|
||||
args = condArgs
|
||||
} else {
|
||||
sqlStr = fmt.Sprintf("SELECT * FROM %s LIMIT 1", tableName)
|
||||
if session.engine.dialect.DBType() == core.MSSQL {
|
||||
sqlStr = fmt.Sprintf("SELECT top 1 * FROM %s", tableName)
|
||||
} else {
|
||||
sqlStr = fmt.Sprintf("SELECT * FROM %s LIMIT 1", tableName)
|
||||
}
|
||||
args = []interface{}{}
|
||||
}
|
||||
} else {
|
||||
@@ -48,7 +57,7 @@ func (session *Session) Exist(bean ...interface{}) (bool, error) {
|
||||
}
|
||||
|
||||
if beanValue.Elem().Kind() == reflect.Struct {
|
||||
if err := session.statement.setRefValue(beanValue.Elem()); err != nil {
|
||||
if err := session.statement.setRefBean(bean[0]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
49
vendor/github.com/go-xorm/xorm/session_find.go
generated
vendored
49
vendor/github.com/go-xorm/xorm/session_find.go
generated
vendored
@@ -29,6 +29,39 @@ func (session *Session) Find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
return session.find(rowsSlicePtr, condiBean...)
|
||||
}
|
||||
|
||||
// FindAndCount find the results and also return the counts
|
||||
func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...interface{}) (int64, error) {
|
||||
if session.isAutoClose {
|
||||
defer session.Close()
|
||||
}
|
||||
|
||||
session.autoResetStatement = false
|
||||
err := session.find(rowsSlicePtr, condiBean...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
|
||||
if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map {
|
||||
return 0, errors.New("needs a pointer to a slice or a map")
|
||||
}
|
||||
|
||||
sliceElementType := sliceValue.Type().Elem()
|
||||
if sliceElementType.Kind() == reflect.Ptr {
|
||||
sliceElementType = sliceElementType.Elem()
|
||||
}
|
||||
session.autoResetStatement = true
|
||||
|
||||
if session.statement.selectStr != "" {
|
||||
session.statement.selectStr = ""
|
||||
}
|
||||
if session.statement.OrderStr != "" {
|
||||
session.statement.OrderStr = ""
|
||||
}
|
||||
|
||||
return session.Count(reflect.New(sliceElementType).Interface())
|
||||
}
|
||||
|
||||
func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error {
|
||||
sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr))
|
||||
if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map {
|
||||
@@ -42,7 +75,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
if sliceElementType.Kind() == reflect.Ptr {
|
||||
if sliceElementType.Elem().Kind() == reflect.Struct {
|
||||
pv := reflect.New(sliceElementType.Elem())
|
||||
if err := session.statement.setRefValue(pv.Elem()); err != nil {
|
||||
if err := session.statement.setRefValue(pv); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -50,7 +83,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
}
|
||||
} else if sliceElementType.Kind() == reflect.Struct {
|
||||
pv := reflect.New(sliceElementType)
|
||||
if err := session.statement.setRefValue(pv.Elem()); err != nil {
|
||||
if err := session.statement.setRefValue(pv); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -128,7 +161,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
}
|
||||
|
||||
args = append(session.statement.joinArgs, condArgs...)
|
||||
sqlStr, err = session.statement.genSelectSQL(columnStr, condSQL)
|
||||
sqlStr, err = session.statement.genSelectSQL(columnStr, condSQL, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -143,7 +176,7 @@ func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{})
|
||||
}
|
||||
|
||||
if session.canCache() {
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil &&
|
||||
if cacher := session.engine.getCacher(table.Name); cacher != nil &&
|
||||
!session.statement.IsDistinct &&
|
||||
!session.statement.unscoped {
|
||||
err = session.cacheFind(sliceElementType, sqlStr, rowsSlicePtr, args...)
|
||||
@@ -288,6 +321,12 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
tableName := session.statement.TableName()
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
if cacher == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, filter := range session.engine.dialect.Filters() {
|
||||
sqlStr = filter.Do(sqlStr, session.engine.dialect, session.statement.RefTable)
|
||||
}
|
||||
@@ -297,9 +336,7 @@ func (session *Session) cacheFind(t reflect.Type, sqlStr string, rowsSlicePtr in
|
||||
return ErrCacheFailed
|
||||
}
|
||||
|
||||
tableName := session.statement.TableName()
|
||||
table := session.statement.RefTable
|
||||
cacher := session.engine.getCacher2(table)
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
if err != nil {
|
||||
rows, err := session.queryRows(newsql, args...)
|
||||
|
||||
15
vendor/github.com/go-xorm/xorm/session_get.go
generated
vendored
15
vendor/github.com/go-xorm/xorm/session_get.go
generated
vendored
@@ -5,6 +5,7 @@
|
||||
package xorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -30,7 +31,7 @@ func (session *Session) get(bean interface{}) (bool, error) {
|
||||
}
|
||||
|
||||
if beanValue.Elem().Kind() == reflect.Struct {
|
||||
if err := session.statement.setRefValue(beanValue.Elem()); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func (session *Session) get(bean interface{}) (bool, error) {
|
||||
table := session.statement.RefTable
|
||||
|
||||
if session.canCache() && beanValue.Elem().Kind() == reflect.Struct {
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil &&
|
||||
if cacher := session.engine.getCacher(table.Name); cacher != nil &&
|
||||
!session.statement.unscoped {
|
||||
has, err := session.cacheGet(bean, sqlStr, args...)
|
||||
if err != ErrCacheFailed {
|
||||
@@ -79,6 +80,13 @@ func (session *Session) nocacheGet(beanKind reflect.Kind, table *core.Table, bea
|
||||
return false, nil
|
||||
}
|
||||
|
||||
switch bean.(type) {
|
||||
case sql.NullInt64, sql.NullBool, sql.NullFloat64, sql.NullString:
|
||||
return true, rows.Scan(&bean)
|
||||
case *sql.NullInt64, *sql.NullBool, *sql.NullFloat64, *sql.NullString:
|
||||
return true, rows.Scan(bean)
|
||||
}
|
||||
|
||||
switch beanKind {
|
||||
case reflect.Struct:
|
||||
fields, err := rows.Columns()
|
||||
@@ -126,8 +134,9 @@ func (session *Session) cacheGet(bean interface{}, sqlStr string, args ...interf
|
||||
return false, ErrCacheFailed
|
||||
}
|
||||
|
||||
cacher := session.engine.getCacher2(session.statement.RefTable)
|
||||
tableName := session.statement.TableName()
|
||||
cacher := session.engine.getCacher(tableName)
|
||||
|
||||
session.engine.logger.Debug("[cacheGet] find sql:", newsql, args)
|
||||
table := session.statement.RefTable
|
||||
ids, err := core.GetCacheSql(cacher, tableName, newsql, args)
|
||||
|
||||
158
vendor/github.com/go-xorm/xorm/session_insert.go
generated
vendored
158
vendor/github.com/go-xorm/xorm/session_insert.go
generated
vendored
@@ -66,11 +66,12 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
return 0, errors.New("could not insert a empty slice")
|
||||
}
|
||||
|
||||
if err := session.statement.setRefValue(reflect.ValueOf(sliceValue.Index(0).Interface())); err != nil {
|
||||
if err := session.statement.setRefBean(sliceValue.Index(0).Interface()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(session.statement.TableName()) <= 0 {
|
||||
tableName := session.statement.TableName()
|
||||
if len(tableName) <= 0 {
|
||||
return 0, ErrTableNotFound
|
||||
}
|
||||
|
||||
@@ -115,15 +116,11 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
if col.IsDeleted {
|
||||
continue
|
||||
}
|
||||
if session.statement.ColumnStr != "" {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); !ok {
|
||||
continue
|
||||
}
|
||||
if session.statement.omitColumnMap.contain(col.Name) {
|
||||
continue
|
||||
}
|
||||
if session.statement.OmitStr != "" {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); ok {
|
||||
continue
|
||||
}
|
||||
if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) {
|
||||
continue
|
||||
}
|
||||
if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime {
|
||||
val, t := session.engine.nowTime(col)
|
||||
@@ -170,15 +167,11 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
if col.IsDeleted {
|
||||
continue
|
||||
}
|
||||
if session.statement.ColumnStr != "" {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); !ok {
|
||||
continue
|
||||
}
|
||||
if session.statement.omitColumnMap.contain(col.Name) {
|
||||
continue
|
||||
}
|
||||
if session.statement.OmitStr != "" {
|
||||
if _, ok := getFlagForColumn(session.statement.columnMap, col); ok {
|
||||
continue
|
||||
}
|
||||
if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) {
|
||||
continue
|
||||
}
|
||||
if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime {
|
||||
val, t := session.engine.nowTime(col)
|
||||
@@ -213,7 +206,6 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
|
||||
var sql = "INSERT INTO %s (%v%v%v) VALUES (%v)"
|
||||
var statement string
|
||||
var tableName = session.statement.TableName()
|
||||
if session.engine.dialect.DBType() == core.ORACLE {
|
||||
sql = "INSERT ALL INTO %s (%v%v%v) VALUES (%v) SELECT 1 FROM DUAL"
|
||||
temp := fmt.Sprintf(") INTO %s (%v%v%v) VALUES (",
|
||||
@@ -240,9 +232,7 @@ func (session *Session) innerInsertMulti(rowsSlicePtr interface{}) (int64, error
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil && session.statement.UseCache {
|
||||
session.cacheInsert(table, tableName)
|
||||
}
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
lenAfterClosures := len(session.afterClosures)
|
||||
for i := 0; i < size; i++ {
|
||||
@@ -298,7 +288,7 @@ func (session *Session) InsertMulti(rowsSlicePtr interface{}) (int64, error) {
|
||||
}
|
||||
|
||||
func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
if err := session.statement.setRefValue(rValue(bean)); err != nil {
|
||||
if err := session.statement.setRefBean(bean); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(session.statement.TableName()) <= 0 {
|
||||
@@ -316,8 +306,8 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
if processor, ok := interface{}(bean).(BeforeInsertProcessor); ok {
|
||||
processor.BeforeInsert()
|
||||
}
|
||||
// --
|
||||
colNames, args, err := genCols(session.statement.RefTable, session, bean, false, false)
|
||||
|
||||
colNames, args, err := session.genInsertColumns(bean)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -402,9 +392,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
|
||||
defer handleAfterInsertProcessorFunc(bean)
|
||||
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil && session.statement.UseCache {
|
||||
session.cacheInsert(table, tableName)
|
||||
}
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
if table.Version != "" && session.statement.checkVersion {
|
||||
verValue, err := table.VersionColumn().ValueOf(bean)
|
||||
@@ -447,9 +435,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
}
|
||||
defer handleAfterInsertProcessorFunc(bean)
|
||||
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil && session.statement.UseCache {
|
||||
session.cacheInsert(table, tableName)
|
||||
}
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
if table.Version != "" && session.statement.checkVersion {
|
||||
verValue, err := table.VersionColumn().ValueOf(bean)
|
||||
@@ -490,9 +476,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) {
|
||||
|
||||
defer handleAfterInsertProcessorFunc(bean)
|
||||
|
||||
if cacher := session.engine.getCacher2(table); cacher != nil && session.statement.UseCache {
|
||||
session.cacheInsert(table, tableName)
|
||||
}
|
||||
session.cacheInsert(tableName)
|
||||
|
||||
if table.Version != "" && session.statement.checkVersion {
|
||||
verValue, err := table.VersionColumn().ValueOf(bean)
|
||||
@@ -539,16 +523,104 @@ func (session *Session) InsertOne(bean interface{}) (int64, error) {
|
||||
return session.innerInsert(bean)
|
||||
}
|
||||
|
||||
func (session *Session) cacheInsert(table *core.Table, tables ...string) error {
|
||||
if table == nil {
|
||||
return ErrCacheFailed
|
||||
func (session *Session) cacheInsert(table string) error {
|
||||
if !session.statement.UseCache {
|
||||
return nil
|
||||
}
|
||||
|
||||
cacher := session.engine.getCacher2(table)
|
||||
for _, t := range tables {
|
||||
session.engine.logger.Debug("[cache] clear sql:", t)
|
||||
cacher.ClearIds(t)
|
||||
cacher := session.engine.getCacher(table)
|
||||
if cacher == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
session.engine.logger.Debug("[cache] clear sql:", table)
|
||||
cacher.ClearIds(table)
|
||||
return nil
|
||||
}
|
||||
|
||||
// genInsertColumns generates insert needed columns
|
||||
func (session *Session) genInsertColumns(bean interface{}) ([]string, []interface{}, error) {
|
||||
table := session.statement.RefTable
|
||||
colNames := make([]string, 0, len(table.ColumnsSeq()))
|
||||
args := make([]interface{}, 0, len(table.ColumnsSeq()))
|
||||
|
||||
for _, col := range table.Columns() {
|
||||
if col.MapType == core.ONLYFROMDB {
|
||||
continue
|
||||
}
|
||||
|
||||
if col.IsDeleted {
|
||||
continue
|
||||
}
|
||||
|
||||
if session.statement.omitColumnMap.contain(col.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(session.statement.columnMap) > 0 && !session.statement.columnMap.contain(col.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := session.statement.incrColumns[col.Name]; ok {
|
||||
continue
|
||||
} else if _, ok := session.statement.decrColumns[col.Name]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldValuePtr, err := col.ValueOf(bean)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fieldValue := *fieldValuePtr
|
||||
|
||||
if col.IsAutoIncrement {
|
||||
switch fieldValue.Type().Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int, reflect.Int64:
|
||||
if fieldValue.Int() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint64:
|
||||
if fieldValue.Uint() == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.String:
|
||||
if len(fieldValue.String()) == 0 {
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if fieldValue.Pointer() == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// !evalphobia! set fieldValue as nil when column is nullable and zero-value
|
||||
if _, ok := getFlagForColumn(session.statement.nullableMap, col); ok {
|
||||
if col.Nullable && isZero(fieldValue.Interface()) {
|
||||
var nilValue *int
|
||||
fieldValue = reflect.ValueOf(nilValue)
|
||||
}
|
||||
}
|
||||
|
||||
if (col.IsCreated || col.IsUpdated) && session.statement.UseAutoTime /*&& isZero(fieldValue.Interface())*/ {
|
||||
// if time is non-empty, then set to auto time
|
||||
val, t := session.engine.nowTime(col)
|
||||
args = append(args, val)
|
||||
|
||||
var colName = col.Name
|
||||
session.afterClosures = append(session.afterClosures, func(bean interface{}) {
|
||||
col := table.GetColumn(colName)
|
||||
setColumnTime(bean, col, t)
|
||||
})
|
||||
} else if col.IsVersion && session.statement.checkVersion {
|
||||
args = append(args, 1)
|
||||
} else {
|
||||
arg, err := session.value2Interface(col, fieldValue)
|
||||
if err != nil {
|
||||
return colNames, args, err
|
||||
}
|
||||
args = append(args, arg)
|
||||
}
|
||||
|
||||
colNames = append(colNames, col.Name)
|
||||
}
|
||||
return colNames, args, nil
|
||||
}
|
||||
|
||||
18
vendor/github.com/go-xorm/xorm/session_query.go
generated
vendored
18
vendor/github.com/go-xorm/xorm/session_query.go
generated
vendored
@@ -17,7 +17,17 @@ import (
|
||||
|
||||
func (session *Session) genQuerySQL(sqlorArgs ...interface{}) (string, []interface{}, error) {
|
||||
if len(sqlorArgs) > 0 {
|
||||
return sqlorArgs[0].(string), sqlorArgs[1:], nil
|
||||
switch sqlorArgs[0].(type) {
|
||||
case string:
|
||||
return sqlorArgs[0].(string), sqlorArgs[1:], nil
|
||||
case *builder.Builder:
|
||||
return sqlorArgs[0].(*builder.Builder).ToSQL()
|
||||
case builder.Builder:
|
||||
bd := sqlorArgs[0].(builder.Builder)
|
||||
return bd.ToSQL()
|
||||
default:
|
||||
return "", nil, ErrUnSupportedType
|
||||
}
|
||||
}
|
||||
|
||||
if session.statement.RawSQL != "" {
|
||||
@@ -54,13 +64,17 @@ func (session *Session) genQuerySQL(sqlorArgs ...interface{}) (string, []interfa
|
||||
}
|
||||
}
|
||||
|
||||
if err := session.statement.processIDParam(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
condSQL, condArgs, err := builder.ToSQL(session.statement.cond)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
args := append(session.statement.joinArgs, condArgs...)
|
||||
sqlStr, err := session.statement.genSelectSQL(columnStr, condSQL)
|
||||
sqlStr, err := session.statement.genSelectSQL(columnStr, condSQL, true, true)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user