diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go
new file mode 100644
index 000000000..5232cb673
--- /dev/null
+++ b/vendor/cloud.google.com/go/iam/iam.go
@@ -0,0 +1,315 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package iam supports the resource-specific operations of Google Cloud
+// IAM (Identity and Access Management) for the Google Cloud Libraries.
+// See https://cloud.google.com/iam for more about IAM.
+//
+// Users of the Google Cloud Libraries will typically not use this package
+// directly. Instead they will begin with some resource that supports IAM, like
+// a pubsub topic, and call its IAM method to get a Handle for that resource.
+package iam
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ gax "github.com/googleapis/gax-go/v2"
+ pb "google.golang.org/genproto/googleapis/iam/v1"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// client abstracts the IAMPolicy API to allow multiple implementations.
+type client interface {
+ Get(ctx context.Context, resource string) (*pb.Policy, error)
+ Set(ctx context.Context, resource string, p *pb.Policy) error
+ Test(ctx context.Context, resource string, perms []string) ([]string, error)
+}
+
+// grpcClient implements client for the standard gRPC-based IAMPolicy service.
+type grpcClient struct {
+ c pb.IAMPolicyClient
+}
+
+var withRetry = gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.DeadlineExceeded,
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60 * time.Second,
+ Multiplier: 1.3,
+ })
+})
+
+func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
+ var proto *pb.Policy
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
+ ctx = insertMetadata(ctx, md)
+
+ err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
+ var err error
+ proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
+ return err
+ }, withRetry)
+ if err != nil {
+ return nil, err
+ }
+ return proto, nil
+}
+
+func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
+ ctx = insertMetadata(ctx, md)
+
+ return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
+ _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
+ Resource: resource,
+ Policy: p,
+ })
+ return err
+ }, withRetry)
+}
+
+func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
+ var res *pb.TestIamPermissionsResponse
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
+ ctx = insertMetadata(ctx, md)
+
+ err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
+ var err error
+ res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
+ Resource: resource,
+ Permissions: perms,
+ })
+ return err
+ }, withRetry)
+ if err != nil {
+ return nil, err
+ }
+ return res.Permissions, nil
+}
+
+// A Handle provides IAM operations for a resource.
+type Handle struct {
+ c client
+ resource string
+}
+
+// InternalNewHandle is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandle returns a Handle for resource.
+// The conn parameter refers to a server that must support the IAMPolicy service.
+func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
+ return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
+}
+
+// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandleClient returns a Handle for resource using the given
+// grpc service that implements IAM as a mixin
+func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
+ return InternalNewHandleClient(&grpcClient{c: c}, resource)
+}
+
+// InternalNewHandleClient is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandleClient returns a Handle for resource using the given
+// client implementation.
+func InternalNewHandleClient(c client, resource string) *Handle {
+ return &Handle{
+ c: c,
+ resource: resource,
+ }
+}
+
+// Policy retrieves the IAM policy for the resource.
+func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
+ proto, err := h.c.Get(ctx, h.resource)
+ if err != nil {
+ return nil, err
+ }
+ return &Policy{InternalProto: proto}, nil
+}
+
+// SetPolicy replaces the resource's current policy with the supplied Policy.
+//
+// If policy was created from a prior call to Get, then the modification will
+// only succeed if the policy has not changed since the Get.
+func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
+ return h.c.Set(ctx, h.resource, policy.InternalProto)
+}
+
+// TestPermissions returns the subset of permissions that the caller has on the resource.
+func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
+ return h.c.Test(ctx, h.resource, permissions)
+}
+
+// A RoleName is a name representing a collection of permissions.
+type RoleName string
+
+// Common role names.
+const (
+ Owner RoleName = "roles/owner"
+ Editor RoleName = "roles/editor"
+ Viewer RoleName = "roles/viewer"
+)
+
+const (
+ // AllUsers is a special member that denotes all users, even unauthenticated ones.
+ AllUsers = "allUsers"
+
+ // AllAuthenticatedUsers is a special member that denotes all authenticated users.
+ AllAuthenticatedUsers = "allAuthenticatedUsers"
+)
+
+// A Policy is a list of Bindings representing roles
+// granted to members.
+//
+// The zero Policy is a valid policy with no bindings.
+type Policy struct {
+ // TODO(jba): when type aliases are available, put Policy into an internal package
+ // and provide an exported alias here.
+
+ // This field is exported for use by the Google Cloud Libraries only.
+ // It may become unexported in a future release.
+ InternalProto *pb.Policy
+}
+
+// Members returns the list of members with the supplied role.
+// The return value should not be modified. Use Add and Remove
+// to modify the members of a role.
+func (p *Policy) Members(r RoleName) []string {
+ b := p.binding(r)
+ if b == nil {
+ return nil
+ }
+ return b.Members
+}
+
+// HasRole reports whether member has role r.
+func (p *Policy) HasRole(member string, r RoleName) bool {
+ return memberIndex(member, p.binding(r)) >= 0
+}
+
+// Add adds member member to role r if it is not already present.
+// A new binding is created if there is no binding for the role.
+func (p *Policy) Add(member string, r RoleName) {
+ b := p.binding(r)
+ if b == nil {
+ if p.InternalProto == nil {
+ p.InternalProto = &pb.Policy{}
+ }
+ p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
+ Role: string(r),
+ Members: []string{member},
+ })
+ return
+ }
+ if memberIndex(member, b) < 0 {
+ b.Members = append(b.Members, member)
+ return
+ }
+}
+
+// Remove removes member from role r if it is present.
+func (p *Policy) Remove(member string, r RoleName) {
+ bi := p.bindingIndex(r)
+ if bi < 0 {
+ return
+ }
+ bindings := p.InternalProto.Bindings
+ b := bindings[bi]
+ mi := memberIndex(member, b)
+ if mi < 0 {
+ return
+ }
+ // Order doesn't matter for bindings or members, so to remove, move the last item
+ // into the removed spot and shrink the slice.
+ if len(b.Members) == 1 {
+ // Remove binding.
+ last := len(bindings) - 1
+ bindings[bi] = bindings[last]
+ bindings[last] = nil
+ p.InternalProto.Bindings = bindings[:last]
+ return
+ }
+ // Remove member.
+ // TODO(jba): worry about multiple copies of m?
+ last := len(b.Members) - 1
+ b.Members[mi] = b.Members[last]
+ b.Members[last] = ""
+ b.Members = b.Members[:last]
+}
+
+// Roles returns the names of all the roles that appear in the Policy.
+func (p *Policy) Roles() []RoleName {
+ if p.InternalProto == nil {
+ return nil
+ }
+ var rns []RoleName
+ for _, b := range p.InternalProto.Bindings {
+ rns = append(rns, RoleName(b.Role))
+ }
+ return rns
+}
+
+// binding returns the Binding for the suppied role, or nil if there isn't one.
+func (p *Policy) binding(r RoleName) *pb.Binding {
+ i := p.bindingIndex(r)
+ if i < 0 {
+ return nil
+ }
+ return p.InternalProto.Bindings[i]
+}
+
+func (p *Policy) bindingIndex(r RoleName) int {
+ if p.InternalProto == nil {
+ return -1
+ }
+ for i, b := range p.InternalProto.Bindings {
+ if b.Role == string(r) {
+ return i
+ }
+ }
+ return -1
+}
+
+// memberIndex returns the index of m in b's Members, or -1 if not found.
+func memberIndex(m string, b *pb.Binding) int {
+ if b == nil {
+ return -1
+ }
+ for i, mm := range b.Members {
+ if mm == m {
+ return i
+ }
+ }
+ return -1
+}
+
+// insertMetadata inserts metadata into the given context
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go
new file mode 100644
index 000000000..72780f764
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/optional/optional.go
@@ -0,0 +1,108 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package optional provides versions of primitive types that can
+// be nil. These are useful in methods that update some of an API object's
+// fields.
+package optional
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+type (
+ // Bool is either a bool or nil.
+ Bool interface{}
+
+ // String is either a string or nil.
+ String interface{}
+
+ // Int is either an int or nil.
+ Int interface{}
+
+ // Uint is either a uint or nil.
+ Uint interface{}
+
+ // Float64 is either a float64 or nil.
+ Float64 interface{}
+
+ // Duration is either a time.Duration or nil.
+ Duration interface{}
+)
+
+// ToBool returns its argument as a bool.
+// It panics if its argument is nil or not a bool.
+func ToBool(v Bool) bool {
+ x, ok := v.(bool)
+ if !ok {
+ doPanic("Bool", v)
+ }
+ return x
+}
+
+// ToString returns its argument as a string.
+// It panics if its argument is nil or not a string.
+func ToString(v String) string {
+ x, ok := v.(string)
+ if !ok {
+ doPanic("String", v)
+ }
+ return x
+}
+
+// ToInt returns its argument as an int.
+// It panics if its argument is nil or not an int.
+func ToInt(v Int) int {
+ x, ok := v.(int)
+ if !ok {
+ doPanic("Int", v)
+ }
+ return x
+}
+
+// ToUint returns its argument as a uint.
+// It panics if its argument is nil or not a uint.
+func ToUint(v Uint) uint {
+ x, ok := v.(uint)
+ if !ok {
+ doPanic("Uint", v)
+ }
+ return x
+}
+
+// ToFloat64 returns its argument as a float64.
+// It panics if its argument is nil or not a float64.
+func ToFloat64(v Float64) float64 {
+ x, ok := v.(float64)
+ if !ok {
+ doPanic("Float64", v)
+ }
+ return x
+}
+
+// ToDuration returns its argument as a time.Duration.
+// It panics if its argument is nil or not a time.Duration.
+func ToDuration(v Duration) time.Duration {
+ x, ok := v.(time.Duration)
+ if !ok {
+ doPanic("Duration", v)
+ }
+ return x
+}
+
+func doPanic(capType string, v interface{}) {
+ panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
+}
diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh
new file mode 100644
index 000000000..d7c5a3e21
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/version/update_version.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+today=$(date +%Y%m%d)
+
+sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
+
diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go
new file mode 100644
index 000000000..d291921b1
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/version/version.go
@@ -0,0 +1,71 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate ./update_version.sh
+
+// Package version contains version information for Google Cloud Client
+// Libraries for Go, as reported in request headers.
+package version
+
+import (
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+// Repo is the current version of the client libraries in this
+// repo. It should be a date in YYYYMMDD format.
+const Repo = "20190802"
+
+// Go returns the Go runtime version. The returned string
+// has no whitespace.
+func Go() string {
+ return goVersion
+}
+
+var goVersion = goVer(runtime.Version())
+
+const develPrefix = "devel +"
+
+func goVer(s string) string {
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ s += "-" + prerelease
+ }
+ return s
+ }
+ return ""
+}
+
+func notSemverRune(r rune) bool {
+ return !strings.ContainsRune("0123456789.", r)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/README.md b/vendor/cloud.google.com/go/pubsub/README.md
new file mode 100644
index 000000000..59f4cf66d
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/README.md
@@ -0,0 +1,46 @@
+## Cloud Pub/Sub [](https://godoc.org/cloud.google.com/go/pubsub)
+
+- [About Cloud Pubsub](https://cloud.google.com/pubsub/)
+- [API documentation](https://cloud.google.com/pubsub/docs)
+- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
+- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
+
+### Example Usage
+
+First create a `pubsub.Client` to use throughout your application:
+
+[snip]:# (pubsub-1)
+```go
+client, err := pubsub.NewClient(ctx, "project-id")
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+Then use the client to publish and subscribe:
+
+[snip]:# (pubsub-2)
+```go
+// Publish "hello world" on topic1.
+topic := client.Topic("topic1")
+res := topic.Publish(ctx, &pubsub.Message{
+ Data: []byte("hello world"),
+})
+// The publish happens asynchronously.
+// Later, you can get the result from res:
+...
+msgID, err := res.Get(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+
+// Use a callback to receive messages via subscription1.
+sub := client.Subscription("subscription1")
+err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) {
+ fmt.Println(m.Data)
+ m.Ack() // Acknowledge that we've consumed the message.
+})
+if err != nil {
+ log.Println(err)
+}
+```
\ No newline at end of file
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/README.md b/vendor/cloud.google.com/go/pubsub/apiv1/README.md
new file mode 100644
index 000000000..b5967ab9c
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/README.md
@@ -0,0 +1,9 @@
+Auto-generated pubsub v1 clients
+=================================
+
+This package includes auto-generated clients for the pubsub v1 API.
+
+Use the handwritten client (in the parent directory,
+cloud.google.com/go/pubsub) in preference to this.
+
+This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME.
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go
new file mode 100644
index 000000000..e89779564
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go
@@ -0,0 +1,103 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+// Package pubsub is an auto-generated package for the
+// Google Cloud Pub/Sub API.
+
+//
+// Provides reliable, many-to-many, asynchronous messaging between
+// applications.
+//
+// Use of Context
+//
+// The ctx passed to NewClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// For information about setting deadlines, reusing contexts, and more
+// please visit godoc.org/cloud.google.com/go.
+//
+// Use the client at cloud.google.com/go/pubsub in preference to this.
+package pubsub // import "cloud.google.com/go/pubsub/apiv1"
+
+import (
+ "context"
+ "runtime"
+ "strings"
+ "unicode"
+
+ "google.golang.org/grpc/metadata"
+)
+
+func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
+ out, _ := metadata.FromOutgoingContext(ctx)
+ out = out.Copy()
+ for _, md := range mds {
+ for k, v := range md {
+ out[k] = append(out[k], v...)
+ }
+ }
+ return metadata.NewOutgoingContext(ctx, out)
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/pubsub",
+ }
+}
+
+// versionGo returns the Go runtime version. The returned string
+// has no whitespace, suitable for reporting in header.
+func versionGo() string {
+ const develPrefix = "devel +"
+
+ s := runtime.Version()
+ if strings.HasPrefix(s, develPrefix) {
+ s = s[len(develPrefix):]
+ if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+ s = s[:p]
+ }
+ return s
+ }
+
+ notSemverRune := func(r rune) bool {
+ return strings.IndexRune("0123456789.", r) < 0
+ }
+
+ if strings.HasPrefix(s, "go1") {
+ s = s[2:]
+ var prerelease string
+ if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+ s, prerelease = s[:p], s[p:]
+ }
+ if strings.HasSuffix(s, ".") {
+ s += "0"
+ } else if strings.Count(s, ".") < 2 {
+ s += ".0"
+ }
+ if prerelease != "" {
+ s += "-" + prerelease
+ }
+ return s
+ }
+ return "UNKNOWN"
+}
+
+const versionClient = "20190819"
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/iam.go b/vendor/cloud.google.com/go/pubsub/apiv1/iam.go
new file mode 100644
index 000000000..4a0c231d7
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/iam.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "cloud.google.com/go/iam"
+ pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
+)
+
+func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
+ return iam.InternalNewHandle(c.Connection(), subscription.Name)
+}
+
+func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
+ return iam.InternalNewHandle(c.Connection(), topic.Name)
+}
+
+func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle {
+ return iam.InternalNewHandle(c.Connection(), subscription.Name)
+}
+
+func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle {
+ return iam.InternalNewHandle(c.Connection(), topic.Name)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go
new file mode 100644
index 000000000..b9ab4848d
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go
@@ -0,0 +1,95 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+// PublisherProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func PublisherProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// PublisherTopicPath returns the path for the topic resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/topics/%s", project, topic)
+// instead.
+func PublisherTopicPath(project, topic string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/topics/" +
+ topic +
+ ""
+}
+
+// SubscriberProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s", project)
+// instead.
+func SubscriberProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// SubscriberSnapshotPath returns the path for the snapshot resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot)
+// instead.
+func SubscriberSnapshotPath(project, snapshot string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/snapshots/" +
+ snapshot +
+ ""
+}
+
+// SubscriberSubscriptionPath returns the path for the subscription resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription)
+// instead.
+func SubscriberSubscriptionPath(project, subscription string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/subscriptions/" +
+ subscription +
+ ""
+}
+
+// SubscriberTopicPath returns the path for the topic resource.
+//
+// Deprecated: Use
+// fmt.Sprintf("projects/%s/topics/%s", project, topic)
+// instead.
+func SubscriberTopicPath(project, topic string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/topics/" +
+ topic +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
new file mode 100644
index 000000000..b99214db0
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go
@@ -0,0 +1,417 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package pubsub
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// PublisherCallOptions contains the retry settings for each method of PublisherClient.
+type PublisherCallOptions struct {
+ CreateTopic []gax.CallOption
+ UpdateTopic []gax.CallOption
+ Publish []gax.CallOption
+ GetTopic []gax.CallOption
+ ListTopics []gax.CallOption
+ ListTopicSubscriptions []gax.CallOption
+ DeleteTopic []gax.CallOption
+}
+
+func defaultPublisherClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("pubsub.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultPublisherCallOptions() *PublisherCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Aborted,
+ codes.Unavailable,
+ codes.Unknown,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ {"default", "non_idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ {"messaging", "publish"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Aborted,
+ codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Internal,
+ codes.ResourceExhausted,
+ codes.Unavailable,
+ codes.Unknown,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &PublisherCallOptions{
+ CreateTopic: retry[[2]string{"default", "non_idempotent"}],
+ UpdateTopic: retry[[2]string{"default", "non_idempotent"}],
+ Publish: retry[[2]string{"messaging", "publish"}],
+ GetTopic: retry[[2]string{"default", "idempotent"}],
+ ListTopics: retry[[2]string{"default", "idempotent"}],
+ ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}],
+ DeleteTopic: retry[[2]string{"default", "non_idempotent"}],
+ }
+}
+
+// PublisherClient is a client for interacting with Google Cloud Pub/Sub API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type PublisherClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ publisherClient pubsubpb.PublisherClient
+
+ // The call options for this service.
+ CallOptions *PublisherCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewPublisherClient creates a new publisher client.
+//
+// The service that an application uses to manipulate topics, and to send
+// messages to a topic.
+func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &PublisherClient{
+ conn: conn,
+ CallOptions: defaultPublisherCallOptions(),
+
+ publisherClient: pubsubpb.NewPublisherClient(conn),
+ }
+ c.SetGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *PublisherClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *PublisherClient) Close() error {
+ return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// CreateTopic creates the given topic with the given name. See the
+//
+// resource name rules.
+func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...)
+ var resp *pubsubpb.Topic
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateTopic updates an existing topic. Note that certain properties of a
+// topic are not modifiable.
+func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic.name", url.QueryEscape(req.GetTopic().GetName())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...)
+ var resp *pubsubpb.Topic
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic
+// does not exist.
+func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...)
+ var resp *pubsubpb.PublishResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// GetTopic gets the configuration of a topic.
+func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...)
+ var resp *pubsubpb.Topic
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListTopics lists matching topics.
+func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...)
+ it := &TopicIterator{}
+ req = proto.Clone(req).(*pubsubpb.ListTopicsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) {
+ var resp *pubsubpb.ListTopicsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Topics, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ it.pageInfo.Token = req.PageToken
+ return it
+}
+
+// ListTopicSubscriptions lists the names of the subscriptions on this topic.
+func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...)
+ it := &StringIterator{}
+ req = proto.Clone(req).(*pubsubpb.ListTopicSubscriptionsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
+ var resp *pubsubpb.ListTopicSubscriptionsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Subscriptions, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ it.pageInfo.Token = req.PageToken
+ return it
+}
+
+// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic
+// does not exist. After a topic is deleted, a new topic may be created with
+// the same name; this is an entirely new topic with none of the old
+// configuration or subscriptions. Existing subscriptions to this topic are
+// not deleted, but their topic field is set to _deleted-topic_.
+func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// StringIterator manages a stream of string.
+type StringIterator struct {
+ items []string
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *StringIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *StringIterator) Next() (string, error) {
+ var item string
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *StringIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *StringIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TopicIterator manages a stream of *pubsubpb.Topic.
+type TopicIterator struct {
+ items []*pubsubpb.Topic
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *TopicIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TopicIterator) Next() (*pubsubpb.Topic, error) {
+ var item *pubsubpb.Topic
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TopicIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TopicIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
new file mode 100644
index 000000000..260615b07
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go
@@ -0,0 +1,635 @@
+// Copyright 2019 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapic-generator. DO NOT EDIT.
+
+package pubsub
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/transport"
+ pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+// SubscriberCallOptions contains the retry settings for each method of SubscriberClient.
+type SubscriberCallOptions struct {
+ CreateSubscription []gax.CallOption
+ GetSubscription []gax.CallOption
+ UpdateSubscription []gax.CallOption
+ ListSubscriptions []gax.CallOption
+ DeleteSubscription []gax.CallOption
+ ModifyAckDeadline []gax.CallOption
+ Acknowledge []gax.CallOption
+ Pull []gax.CallOption
+ StreamingPull []gax.CallOption
+ ModifyPushConfig []gax.CallOption
+ ListSnapshots []gax.CallOption
+ CreateSnapshot []gax.CallOption
+ UpdateSnapshot []gax.CallOption
+ DeleteSnapshot []gax.CallOption
+ Seek []gax.CallOption
+}
+
+func defaultSubscriberClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithEndpoint("pubsub.googleapis.com:443"),
+ option.WithScopes(DefaultAuthScopes()...),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultSubscriberCallOptions() *SubscriberCallOptions {
+ retry := map[[2]string][]gax.CallOption{
+ {"default", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Aborted,
+ codes.Unavailable,
+ codes.Unknown,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ {"default", "non_idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ {"messaging", "idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Aborted,
+ codes.Unavailable,
+ codes.Unknown,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ {"messaging", "non_idempotent"}: {
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 60000 * time.Millisecond,
+ Multiplier: 1.3,
+ })
+ }),
+ },
+ }
+ return &SubscriberCallOptions{
+ CreateSubscription: retry[[2]string{"default", "idempotent"}],
+ GetSubscription: retry[[2]string{"default", "idempotent"}],
+ UpdateSubscription: retry[[2]string{"default", "non_idempotent"}],
+ ListSubscriptions: retry[[2]string{"default", "idempotent"}],
+ DeleteSubscription: retry[[2]string{"default", "non_idempotent"}],
+ ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}],
+ Acknowledge: retry[[2]string{"messaging", "non_idempotent"}],
+ Pull: retry[[2]string{"messaging", "idempotent"}],
+ StreamingPull: retry[[2]string{"streaming_messaging", "none"}],
+ ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}],
+ ListSnapshots: retry[[2]string{"default", "idempotent"}],
+ CreateSnapshot: retry[[2]string{"default", "non_idempotent"}],
+ UpdateSnapshot: retry[[2]string{"default", "non_idempotent"}],
+ DeleteSnapshot: retry[[2]string{"default", "non_idempotent"}],
+ Seek: retry[[2]string{"default", "idempotent"}],
+ }
+}
+
+// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type SubscriberClient struct {
+ // The connection to the service.
+ conn *grpc.ClientConn
+
+ // The gRPC API client.
+ subscriberClient pubsubpb.SubscriberClient
+
+ // The call options for this service.
+ CallOptions *SubscriberCallOptions
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogMetadata metadata.MD
+}
+
+// NewSubscriberClient creates a new subscriber client.
+//
+// The service that an application uses to manipulate subscriptions and to
+// consume messages from a subscription via the Pull method or by
+// establishing a bi-directional stream using the StreamingPull method.
+func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) {
+ conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ c := &SubscriberClient{
+ conn: conn,
+ CallOptions: defaultSubscriberCallOptions(),
+
+ subscriberClient: pubsubpb.NewSubscriberClient(conn),
+ }
+ c.SetGoogleClientInfo()
+ return c, nil
+}
+
+// Connection returns the client's connection to the API service.
+func (c *SubscriberClient) Connection() *grpc.ClientConn {
+ return c.conn
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *SubscriberClient) Close() error {
+ return c.conn.Close()
+}
+
+// SetGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", versionGo()}, keyval...)
+ kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
+}
+
+// CreateSubscription creates a subscription to a given topic. See the
+//
+// resource name rules.
+// If the subscription already exists, returns ALREADY_EXISTS.
+// If the corresponding topic doesn't exist, returns NOT_FOUND.
+//
+// If the name is not provided in the request, the server will assign a random
+// name for this subscription on the same project as the topic, conforming
+// to the
+// resource name
+// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The
+// generated name is populated in the returned Subscription object. Note that
+// for REST API requests, you must specify a name in the request.
+func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...)
+ var resp *pubsubpb.Subscription
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// GetSubscription gets the configuration details of a subscription.
+func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...)
+ var resp *pubsubpb.Subscription
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateSubscription updates an existing subscription. Note that certain properties of a
+// subscription, such as its topic, are not modifiable.
+func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription.name", url.QueryEscape(req.GetSubscription().GetName())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...)
+ var resp *pubsubpb.Subscription
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ListSubscriptions lists matching subscriptions.
+func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...)
+ it := &SubscriptionIterator{}
+ req = proto.Clone(req).(*pubsubpb.ListSubscriptionsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) {
+ var resp *pubsubpb.ListSubscriptionsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Subscriptions, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ it.pageInfo.Token = req.PageToken
+ return it
+}
+
+// DeleteSubscription deletes an existing subscription. All messages retained in the subscription
+// are immediately dropped. Calls to Pull after deletion will return
+// NOT_FOUND. After a subscription is deleted, a new one may be created with
+// the same name, but the new one has no association with the old
+// subscription or its topic unless the same topic is specified.
+func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful
+// to indicate that more time is needed to process a message by the
+// subscriber, or to make the message available for redelivery if the
+// processing was interrupted. Note that this does not modify the
+// subscription-level ackDeadlineSeconds used for subsequent messages.
+func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// Acknowledge acknowledges the messages associated with the ack_ids in the
+// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages
+// from the subscription.
+//
+// Acknowledging a message whose ack deadline has expired may succeed,
+// but such a message may be redelivered later. Acknowledging a message more
+// than once will not result in an error.
+func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// Pull pulls messages from the server. The server may return UNAVAILABLE if
+// there are too many concurrent pull requests pending for the given
+// subscription.
+func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...)
+ var resp *pubsubpb.PullResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// StreamingPull establishes a stream with the server, which sends messages down to the
+// client. The client streams acknowledgements and ack deadline modifications
+// back to the server. The server will close the stream and return the status
+// on any error. The server may close the stream with status UNAVAILABLE to
+// reassign server-side resources, in which case, the client should
+// re-establish the stream. Flow control can be achieved by configuring the
+// underlying RPC channel.
+func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) {
+ ctx = insertMetadata(ctx, c.xGoogMetadata)
+ opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...)
+ var resp pubsubpb.Subscriber_StreamingPullClient
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// ModifyPushConfig modifies the PushConfig for a specified subscription.
+//
+// This may be used to change a push subscription to a pull one (signified by
+// an empty PushConfig) or vice versa, or change the endpoint URL and other
+// attributes of a push subscription. Messages will accumulate for delivery
+// continuously through the call regardless of changes to the PushConfig.
+func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// ListSnapshots lists the existing snapshots. Snapshots are used in
+// Seek
+// operations, which allow
+// you to manage message acknowledgments in bulk. That is, you can set the
+// acknowledgment state of messages in an existing subscription to the state
+// captured by a snapshot.
+func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...)
+ it := &SnapshotIterator{}
+ req = proto.Clone(req).(*pubsubpb.ListSnapshotsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) {
+ var resp *pubsubpb.ListSnapshotsResponse
+ req.PageToken = pageToken
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return resp.Snapshots, resp.NextPageToken, nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.PageSize)
+ it.pageInfo.Token = req.PageToken
+ return it
+}
+
+// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in
+// Seek
+// operations, which allow
+// you to manage message acknowledgments in bulk. That is, you can set the
+// acknowledgment state of messages in an existing subscription to the state
+// captured by a snapshot.
+//
If the snapshot already exists, returns ALREADY_EXISTS.
+// If the requested subscription doesn't exist, returns NOT_FOUND.
+// If the backlog in the subscription is too old -- and the resulting snapshot
+// would expire in less than 1 hour -- then FAILED_PRECONDITION is returned.
+// See also the Snapshot.expire_time field. If the name is not provided in
+// the request, the server will assign a random
+// name for this snapshot on the same project as the subscription, conforming
+// to the
+// resource name
+// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The
+// generated name is populated in the returned Snapshot object. Note that for
+// REST API requests, you must specify a name in the request.
+func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...)
+ var resp *pubsubpb.Snapshot
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// UpdateSnapshot updates an existing snapshot. Snapshots are used in
+// Seek
+// operations, which allow
+// you to manage message acknowledgments in bulk. That is, you can set the
+// acknowledgment state of messages in an existing subscription to the state
+// captured by a snapshot.
+func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot.name", url.QueryEscape(req.GetSnapshot().GetName())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...)
+ var resp *pubsubpb.Snapshot
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// DeleteSnapshot removes an existing snapshot. Snapshots are used in
+// Seek
+// operations, which allow
+// you to manage message acknowledgments in bulk. That is, you can set the
+// acknowledgment state of messages in an existing subscription to the state
+// captured by a snapshot.
+// When the snapshot is deleted, all messages retained in the snapshot
+// are immediately dropped. After a snapshot is deleted, a new one may be
+// created with the same name, but the new one has no association with the old
+// snapshot or its subscription, unless the same subscription is specified.
+func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+// Seek seeks an existing subscription to a point in time or to a given snapshot,
+// whichever is provided in the request. Snapshots are used in
+// Seek
+// operations, which allow
+// you to manage message acknowledgments in bulk. That is, you can set the
+// acknowledgment state of messages in an existing subscription to the state
+// captured by a snapshot. Note that both the subscription and the snapshot
+// must be on the same topic.
+func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) {
+ md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription())))
+ ctx = insertMetadata(ctx, c.xGoogMetadata, md)
+ opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...)
+ var resp *pubsubpb.SeekResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// SnapshotIterator manages a stream of *pubsubpb.Snapshot.
+type SnapshotIterator struct {
+ items []*pubsubpb.Snapshot
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *SnapshotIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) {
+ var item *pubsubpb.Snapshot
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *SnapshotIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *SnapshotIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// SubscriptionIterator manages a stream of *pubsubpb.Subscription.
+type SubscriptionIterator struct {
+ items []*pubsubpb.Subscription
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) {
+ var item *pubsubpb.Subscription
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *SubscriptionIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *SubscriptionIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/pubsub/debug.go b/vendor/cloud.google.com/go/pubsub/debug.go
new file mode 100644
index 000000000..977ae577f
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/debug.go
@@ -0,0 +1,72 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build psdebug
+
+package pubsub
+
+import (
+ "sync"
+ "time"
+)
+
+var (
+ dmu sync.Mutex
+ msgTraces = map[string][]Event{}
+ ackIDToMsgID = map[string]string{}
+)
+
+type Event struct {
+ Desc string
+ At time.Time
+}
+
+func MessageEvents(msgID string) []Event {
+ dmu.Lock()
+ defer dmu.Unlock()
+ return msgTraces[msgID]
+}
+
+func addRecv(msgID, ackID string, t time.Time) {
+ dmu.Lock()
+ defer dmu.Unlock()
+ ackIDToMsgID[ackID] = msgID
+ addEvent(msgID, "recv", t)
+}
+
+func addAcks(ackIDs []string) {
+ dmu.Lock()
+ defer dmu.Unlock()
+ now := time.Now()
+ for _, id := range ackIDs {
+ addEvent(ackIDToMsgID[id], "ack", now)
+ }
+}
+
+func addModAcks(ackIDs []string, deadlineSecs int32) {
+ dmu.Lock()
+ defer dmu.Unlock()
+ desc := "modack"
+ if deadlineSecs == 0 {
+ desc = "nack"
+ }
+ now := time.Now()
+ for _, id := range ackIDs {
+ addEvent(ackIDToMsgID[id], desc, now)
+ }
+}
+
+func addEvent(msgID, desc string, t time.Time) {
+ msgTraces[msgID] = append(msgTraces[msgID], Event{desc, t})
+}
diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go
new file mode 100644
index 000000000..a86fc3d4a
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/doc.go
@@ -0,0 +1,140 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub
+messages, hiding the details of the underlying server RPCs. Google Cloud
+Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders
+and receivers.
+
+More information about Google Cloud Pub/Sub is available at
+https://cloud.google.com/pubsub/docs
+
+See https://godoc.org/cloud.google.com/go for authentication, timeouts,
+connection pooling and similar aspects of this package.
+
+
+Publishing
+
+Google Cloud Pub/Sub messages are published to topics. Topics may be created
+using the pubsub package like so:
+
+ topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name")
+
+Messages may then be published to a topic:
+
+ res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")})
+
+Publish queues the message for publishing and returns immediately. When enough
+messages have accumulated, or enough time has elapsed, the batch of messages is
+sent to the Pub/Sub service.
+
+Publish returns a PublishResult, which behaves like a future: its Get method
+blocks until the message has been sent to the service.
+
+The first time you call Publish on a topic, goroutines are started in the
+background. To clean up these goroutines, call Stop:
+
+ topic.Stop()
+
+
+Receiving
+
+To receive messages published to a topic, clients create subscriptions
+to the topic. There may be more than one subscription per topic; each message
+that is published to the topic will be delivered to all of its subscriptions.
+
+Subsciptions may be created like so:
+
+ sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name",
+ pubsub.SubscriptionConfig{Topic: topic})
+
+Messages are then consumed from a subscription via callback.
+
+ err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) {
+ log.Printf("Got message: %s", m.Data)
+ m.Ack()
+ })
+ if err != nil {
+ // Handle error.
+ }
+
+The callback is invoked concurrently by multiple goroutines, maximizing
+throughput. To terminate a call to Receive, cancel its context.
+
+Once client code has processed the message, it must call Message.Ack or
+message.Nack, otherwise the message will eventually be redelivered. If the
+client cannot or doesn't want to process the message, it can call Message.Nack
+to speed redelivery. For more information and configuration options, see
+"Deadlines" below.
+
+Note: It is possible for Messages to be redelivered, even if Message.Ack has
+been called. Client code must be robust to multiple deliveries of messages.
+
+Note: This uses pubsub's streaming pull feature. This feature properties that
+may be surprising. Please take a look at https://cloud.google.com/pubsub/docs/pull#streamingpull
+for more details on how streaming pull behaves compared to the synchronous
+pull method.
+
+
+Deadlines
+
+The default pubsub deadlines are suitable for most use cases, but may be
+overridden. This section describes the tradeoffs that should be considered
+when overriding the defaults.
+
+Behind the scenes, each message returned by the Pub/Sub server has an
+associated lease, known as an "ACK deadline". Unless a message is
+acknowledged within the ACK deadline, or the client requests that
+the ACK deadline be extended, the message will become eligible for redelivery.
+
+As a convenience, the pubsub client will automatically extend deadlines until
+either:
+ * Message.Ack or Message.Nack is called, or
+ * The "MaxExtension" period elapses from the time the message is fetched from the server.
+
+ACK deadlines are extended periodically by the client. The initial ACK
+deadline given to messages is 10s. The period between extensions, as well as the
+length of the extension, automatically adjust depending on the time it takes to ack
+messages, up to 10m. This has the effect that subscribers that process messages
+quickly have their message ack deadlines extended for a short amount, whereas
+subscribers that process message slowly have their message ack deadlines extended
+for a large amount. The net effect is fewer RPCs sent from the client library.
+
+For example, consider a subscriber that takes 3 minutes to process each message.
+Since the library has already recorded several 3 minute "time to ack"s in a
+percentile distribution, future message extensions are sent with a value of 3
+minutes, every 3 minutes. Suppose the application crashes 5 seconds after the
+library sends such an extension: the Pub/Sub server would wait the remaining
+2m55s before re-sending the messages out to other subscribers.
+
+Please note that the client library does not use the subscription's AckDeadline
+by default. To enforce the subscription AckDeadline, set MaxExtension to the
+subscription's AckDeadline:
+
+ cfg, err := sub.Config(ctx)
+ if err != nil {
+ // TODO: handle err
+ }
+
+ sub.ReceiveSettings.MaxExtension = cfg.AckDeadline
+
+
+Slow Message Processing
+
+For use cases where message processing exceeds 30 minutes, we recommend using
+the base client in a pull model, since long-lived streams are periodically killed
+by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing
+*/
+package pubsub // import "cloud.google.com/go/pubsub"
diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go
new file mode 100644
index 000000000..3f165a0ac
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go
@@ -0,0 +1,122 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "sync/atomic"
+
+ "golang.org/x/sync/semaphore"
+)
+
+// flowController implements flow control for Subscription.Receive.
+type flowController struct {
+ maxCount int
+ maxSize int // max total size of messages
+ semCount, semSize *semaphore.Weighted // enforces max number and size of messages
+ // Number of calls to acquire - number of calls to release. This can go
+ // negative if semCount == nil and a large acquire is followed by multiple
+ // small releases.
+ // Atomic.
+ countRemaining int64
+}
+
+// newFlowController creates a new flowController that ensures no more than
+// maxCount messages or maxSize bytes are outstanding at once. If maxCount or
+// maxSize is < 1, then an unlimited number of messages or bytes is permitted,
+// respectively.
+func newFlowController(maxCount, maxSize int) *flowController {
+ fc := &flowController{
+ maxCount: maxCount,
+ maxSize: maxSize,
+ semCount: nil,
+ semSize: nil,
+ }
+ if maxCount > 0 {
+ fc.semCount = semaphore.NewWeighted(int64(maxCount))
+ }
+ if maxSize > 0 {
+ fc.semSize = semaphore.NewWeighted(int64(maxSize))
+ }
+ return fc
+}
+
+// acquire blocks until one message of size bytes can proceed or ctx is done.
+// It returns nil in the first case, or ctx.Err() in the second.
+//
+// acquire allows large messages to proceed by treating a size greater than maxSize
+// as if it were equal to maxSize.
+func (f *flowController) acquire(ctx context.Context, size int) error {
+ if f.semCount != nil {
+ if err := f.semCount.Acquire(ctx, 1); err != nil {
+ return err
+ }
+ }
+ if f.semSize != nil {
+ if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil {
+ if f.semCount != nil {
+ f.semCount.Release(1)
+ }
+ return err
+ }
+ }
+ atomic.AddInt64(&f.countRemaining, 1)
+ return nil
+}
+
+// tryAcquire returns false if acquire would block. Otherwise, it behaves like
+// acquire and returns true.
+//
+// tryAcquire allows large messages to proceed by treating a size greater than
+// maxSize as if it were equal to maxSize.
+func (f *flowController) tryAcquire(size int) bool {
+ if f.semCount != nil {
+ if !f.semCount.TryAcquire(1) {
+ return false
+ }
+ }
+ if f.semSize != nil {
+ if !f.semSize.TryAcquire(f.bound(size)) {
+ if f.semCount != nil {
+ f.semCount.Release(1)
+ }
+ return false
+ }
+ }
+ atomic.AddInt64(&f.countRemaining, 1)
+ return true
+}
+
+// release notes that one message of size bytes is no longer outstanding.
+func (f *flowController) release(size int) {
+ atomic.AddInt64(&f.countRemaining, -1)
+ if f.semCount != nil {
+ f.semCount.Release(1)
+ }
+ if f.semSize != nil {
+ f.semSize.Release(f.bound(size))
+ }
+}
+
+func (f *flowController) bound(size int) int64 {
+ if size > f.maxSize {
+ return int64(f.maxSize)
+ }
+ return int64(size)
+}
+
+func (f *flowController) count() int {
+ return int(atomic.LoadInt64(&f.countRemaining))
+}
diff --git a/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go
new file mode 100644
index 000000000..3c061fb1d
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go
@@ -0,0 +1,79 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package distribution
+
+import (
+ "log"
+ "math"
+ "sort"
+ "sync"
+ "sync/atomic"
+)
+
+// D is a distribution. Methods of D can be called concurrently by multiple
+// goroutines.
+type D struct {
+ buckets []uint64
+ // sumsReuse is the scratch space that is reused
+ // to store sums during invocations of Percentile.
+ // After an invocation of New(n):
+ // len(buckets) == len(sumsReuse) == n
+ sumsReuse []uint64
+ mu sync.Mutex
+}
+
+// New creates a new distribution capable of holding values from 0 to n-1.
+func New(n int) *D {
+ return &D{
+ buckets: make([]uint64, n),
+ sumsReuse: make([]uint64, n),
+ }
+}
+
+// Record records value v to the distribution.
+// To help with distributions with long tails, if v is larger than the maximum value,
+// Record records the maximum value instead.
+// If v is negative, Record panics.
+func (d *D) Record(v int) {
+ if v < 0 {
+ log.Panicf("Record: value out of range: %d", v)
+ } else if v >= len(d.buckets) {
+ v = len(d.buckets) - 1
+ }
+ atomic.AddUint64(&d.buckets[v], 1)
+}
+
+// Percentile computes the p-th percentile of the distribution where
+// p is between 0 and 1. This method may be called by multiple goroutines.
+func (d *D) Percentile(p float64) int {
+ // NOTE: This implementation uses the nearest-rank method.
+ // https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method
+
+ if p < 0 || p > 1 {
+ log.Panicf("Percentile: percentile out of range: %f", p)
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ var sum uint64
+ for i := range d.sumsReuse {
+ sum += atomic.LoadUint64(&d.buckets[i])
+ d.sumsReuse[i] = sum
+ }
+
+ target := uint64(math.Ceil(float64(sum) * p))
+ return sort.Search(len(d.sumsReuse), func(i int) bool { return d.sumsReuse[i] >= target })
+}
diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go
new file mode 100644
index 000000000..b2455777d
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/iterator.go
@@ -0,0 +1,527 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "io"
+ "sync"
+ "time"
+
+ vkit "cloud.google.com/go/pubsub/apiv1"
+ "cloud.google.com/go/pubsub/internal/distribution"
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// Between message receipt and ack (that is, the time spent processing a message) we want to extend the message
+// deadline by way of modack. However, we don't want to extend the deadline right as soon as the deadline expires;
+// instead, we'd want to extend the deadline a little bit of time ahead. gracePeriod is that amount of time ahead
+// of the actual deadline.
+const gracePeriod = 5 * time.Second
+
+type messageIterator struct {
+ ctx context.Context
+ cancel func() // the function that will cancel ctx; called in stop
+ po *pullOptions
+ ps *pullStream
+ subc *vkit.SubscriberClient
+ subName string
+ kaTick <-chan time.Time // keep-alive (deadline extensions)
+ ackTicker *time.Ticker // message acks
+ nackTicker *time.Ticker // message nacks (more frequent than acks)
+ pingTicker *time.Ticker // sends to the stream to keep it open
+ failed chan struct{} // closed on stream error
+ drained chan struct{} // closed when stopped && no more pending messages
+ wg sync.WaitGroup
+
+ mu sync.Mutex
+ ackTimeDist *distribution.D // dist uses seconds
+
+ // keepAliveDeadlines is a map of id to expiration time. This map is used in conjunction with
+ // subscription.ReceiveSettings.MaxExtension to record the maximum amount of time (the
+ // deadline, more specifically) we're willing to extend a message's ack deadline. As each
+ // message arrives, we'll record now+MaxExtension in this table; whenever we have a chance
+ // to update ack deadlines (via modack), we'll consult this table and only include IDs
+ // that are not beyond their deadline.
+ keepAliveDeadlines map[string]time.Time
+ pendingAcks map[string]bool
+ pendingNacks map[string]bool
+ pendingModAcks map[string]bool // ack IDs whose ack deadline is to be modified
+ err error // error from stream failure
+}
+
+// newMessageIterator starts and returns a new messageIterator.
+// subName is the full name of the subscription to pull messages from.
+// Stop must be called on the messageIterator when it is no longer needed.
+// The iterator always uses the background context for acking messages and extending message deadlines.
+func newMessageIterator(subc *vkit.SubscriberClient, subName string, po *pullOptions) *messageIterator {
+ var ps *pullStream
+ if !po.synchronous {
+ ps = newPullStream(context.Background(), subc.StreamingPull, subName)
+ }
+ // The period will update each tick based on the distribution of acks. We'll start by arbitrarily sending
+ // the first keepAlive halfway towards the minimum ack deadline.
+ keepAlivePeriod := minAckDeadline / 2
+
+ // Ack promptly so users don't lose work if client crashes.
+ ackTicker := time.NewTicker(100 * time.Millisecond)
+ nackTicker := time.NewTicker(100 * time.Millisecond)
+ pingTicker := time.NewTicker(30 * time.Second)
+ cctx, cancel := context.WithCancel(context.Background())
+ it := &messageIterator{
+ ctx: cctx,
+ cancel: cancel,
+ ps: ps,
+ po: po,
+ subc: subc,
+ subName: subName,
+ kaTick: time.After(keepAlivePeriod),
+ ackTicker: ackTicker,
+ nackTicker: nackTicker,
+ pingTicker: pingTicker,
+ failed: make(chan struct{}),
+ drained: make(chan struct{}),
+ ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1),
+ keepAliveDeadlines: map[string]time.Time{},
+ pendingAcks: map[string]bool{},
+ pendingNacks: map[string]bool{},
+ pendingModAcks: map[string]bool{},
+ }
+ it.wg.Add(1)
+ go it.sender()
+ return it
+}
+
+// Subscription.receive will call stop on its messageIterator when finished with it.
+// Stop will block until Done has been called on all Messages that have been
+// returned by Next, or until the context with which the messageIterator was created
+// is cancelled or exceeds its deadline.
+func (it *messageIterator) stop() {
+ it.cancel()
+ it.mu.Lock()
+ it.checkDrained()
+ it.mu.Unlock()
+ it.wg.Wait()
+}
+
+// checkDrained closes the drained channel if the iterator has been stopped and all
+// pending messages have either been n/acked or expired.
+//
+// Called with the lock held.
+func (it *messageIterator) checkDrained() {
+ select {
+ case <-it.drained:
+ return
+ default:
+ }
+ select {
+ case <-it.ctx.Done():
+ if len(it.keepAliveDeadlines) == 0 {
+ close(it.drained)
+ }
+ default:
+ }
+}
+
+// Called when a message is acked/nacked.
+func (it *messageIterator) done(ackID string, ack bool, receiveTime time.Time) {
+ it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second))
+ it.mu.Lock()
+ defer it.mu.Unlock()
+ delete(it.keepAliveDeadlines, ackID)
+ if ack {
+ it.pendingAcks[ackID] = true
+ } else {
+ it.pendingNacks[ackID] = true
+ }
+ it.checkDrained()
+}
+
+// fail is called when a stream method returns a permanent error.
+// fail returns it.err. This may be err, or it may be the error
+// set by an earlier call to fail.
+func (it *messageIterator) fail(err error) error {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+ if it.err == nil {
+ it.err = err
+ close(it.failed)
+ }
+ return it.err
+}
+
+// receive makes a call to the stream's Recv method, or the Pull RPC, and returns
+// its messages.
+// maxToPull is the maximum number of messages for the Pull RPC.
+func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) {
+ it.mu.Lock()
+ ierr := it.err
+ it.mu.Unlock()
+ if ierr != nil {
+ return nil, ierr
+ }
+
+ // Stop retrieving messages if the iterator's Stop method was called.
+ select {
+ case <-it.ctx.Done():
+ it.wg.Wait()
+ return nil, io.EOF
+ default:
+ }
+
+ var rmsgs []*pb.ReceivedMessage
+ var err error
+ if it.po.synchronous {
+ rmsgs, err = it.pullMessages(maxToPull)
+ } else {
+ rmsgs, err = it.recvMessages()
+ }
+ // Any error here is fatal.
+ if err != nil {
+ return nil, it.fail(err)
+ }
+ msgs, err := convertMessages(rmsgs)
+ if err != nil {
+ return nil, it.fail(err)
+ }
+ // We received some messages. Remember them so we can keep them alive. Also,
+ // do a receipt mod-ack when streaming.
+ maxExt := time.Now().Add(it.po.maxExtension)
+ ackIDs := map[string]bool{}
+ it.mu.Lock()
+ now := time.Now()
+ for _, m := range msgs {
+ m.receiveTime = now
+ addRecv(m.ID, m.ackID, now)
+ m.doneFunc = it.done
+ it.keepAliveDeadlines[m.ackID] = maxExt
+ // Don't change the mod-ack if the message is going to be nacked. This is
+ // possible if there are retries.
+ if !it.pendingNacks[m.ackID] {
+ ackIDs[m.ackID] = true
+ }
+ }
+ deadline := it.ackDeadline()
+ it.mu.Unlock()
+ if len(ackIDs) > 0 {
+ if !it.sendModAck(ackIDs, deadline) {
+ return nil, it.err
+ }
+ }
+ return msgs, nil
+}
+
+// Get messages using the Pull RPC.
+// This may block indefinitely. It may also return zero messages, after some time waiting.
+func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, error) {
+ // Use it.ctx as the RPC context, so that if the iterator is stopped, the call
+ // will return immediately.
+ res, err := it.subc.Pull(it.ctx, &pb.PullRequest{
+ Subscription: it.subName,
+ MaxMessages: maxToPull,
+ }, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
+ switch {
+ case err == context.Canceled:
+ return nil, nil
+ case err != nil:
+ return nil, err
+ default:
+ return res.ReceivedMessages, nil
+ }
+}
+
+func (it *messageIterator) recvMessages() ([]*pb.ReceivedMessage, error) {
+ res, err := it.ps.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return res.ReceivedMessages, nil
+}
+
+// sender runs in a goroutine and handles all sends to the stream.
+func (it *messageIterator) sender() {
+ defer it.wg.Done()
+ defer it.ackTicker.Stop()
+ defer it.nackTicker.Stop()
+ defer it.pingTicker.Stop()
+ defer func() {
+ if it.ps != nil {
+ it.ps.CloseSend()
+ }
+ }()
+
+ done := false
+ for !done {
+ sendAcks := false
+ sendNacks := false
+ sendModAcks := false
+ sendPing := false
+
+ dl := it.ackDeadline()
+
+ select {
+ case <-it.failed:
+ // Stream failed: nothing to do, so stop immediately.
+ return
+
+ case <-it.drained:
+ // All outstanding messages have been marked done:
+ // nothing left to do except make the final calls.
+ it.mu.Lock()
+ sendAcks = (len(it.pendingAcks) > 0)
+ sendNacks = (len(it.pendingNacks) > 0)
+ // No point in sending modacks.
+ done = true
+
+ case <-it.kaTick:
+ it.mu.Lock()
+ it.handleKeepAlives()
+ sendModAcks = (len(it.pendingModAcks) > 0)
+
+ nextTick := dl - gracePeriod
+ if nextTick <= 0 {
+ // If the deadline is <= gracePeriod, let's tick again halfway to
+ // the deadline.
+ nextTick = dl / 2
+ }
+ it.kaTick = time.After(nextTick)
+
+ case <-it.nackTicker.C:
+ it.mu.Lock()
+ sendNacks = (len(it.pendingNacks) > 0)
+
+ case <-it.ackTicker.C:
+ it.mu.Lock()
+ sendAcks = (len(it.pendingAcks) > 0)
+
+ case <-it.pingTicker.C:
+ it.mu.Lock()
+ // Ping only if we are processing messages via streaming.
+ sendPing = !it.po.synchronous && (len(it.keepAliveDeadlines) > 0)
+ }
+ // Lock is held here.
+ var acks, nacks, modAcks map[string]bool
+ if sendAcks {
+ acks = it.pendingAcks
+ it.pendingAcks = map[string]bool{}
+ }
+ if sendNacks {
+ nacks = it.pendingNacks
+ it.pendingNacks = map[string]bool{}
+ }
+ if sendModAcks {
+ modAcks = it.pendingModAcks
+ it.pendingModAcks = map[string]bool{}
+ }
+ it.mu.Unlock()
+ // Make Ack and ModAck RPCs.
+ if sendAcks {
+ if !it.sendAck(acks) {
+ return
+ }
+ }
+ if sendNacks {
+ // Nack indicated by modifying the deadline to zero.
+ if !it.sendModAck(nacks, 0) {
+ return
+ }
+ }
+ if sendModAcks {
+ if !it.sendModAck(modAcks, dl) {
+ return
+ }
+ }
+ if sendPing {
+ it.pingStream()
+ }
+ }
+}
+
+// handleKeepAlives modifies the pending request to include deadline extensions
+// for live messages. It also purges expired messages.
+//
+// Called with the lock held.
+func (it *messageIterator) handleKeepAlives() {
+ now := time.Now()
+ for id, expiry := range it.keepAliveDeadlines {
+ if expiry.Before(now) {
+ // This delete will not result in skipping any map items, as implied by
+ // the spec at https://golang.org/ref/spec#For_statements, "For
+ // statements with range clause", note 3, and stated explicitly at
+ // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ.
+ delete(it.keepAliveDeadlines, id)
+ } else {
+ // This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines.
+ it.pendingModAcks[id] = true
+ }
+ }
+ it.checkDrained()
+}
+
+func (it *messageIterator) sendAck(m map[string]bool) bool {
+ // Account for the Subscription field.
+ overhead := calcFieldSizeString(it.subName)
+ return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error {
+ recordStat(it.ctx, AckCount, int64(len(ids)))
+ addAcks(ids)
+ // Use context.Background() as the call's context, not it.ctx. We don't
+ // want to cancel this RPC when the iterator is stopped.
+ return it.subc.Acknowledge(context.Background(), &pb.AcknowledgeRequest{
+ Subscription: it.subName,
+ AckIds: ids,
+ })
+ })
+}
+
+// The receipt mod-ack amount is derived from a percentile distribution based
+// on the time it takes to process messages. The percentile chosen is the 99%th
+// percentile in order to capture the highest amount of time necessary without
+// considering 1% outliers.
+func (it *messageIterator) sendModAck(m map[string]bool, deadline time.Duration) bool {
+ deadlineSec := int32(deadline / time.Second)
+ // Account for the Subscription and AckDeadlineSeconds fields.
+ overhead := calcFieldSizeString(it.subName) + calcFieldSizeInt(int(deadlineSec))
+ return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error {
+ if deadline == 0 {
+ recordStat(it.ctx, NackCount, int64(len(ids)))
+ } else {
+ recordStat(it.ctx, ModAckCount, int64(len(ids)))
+ }
+ addModAcks(ids, deadlineSec)
+ // Retry this RPC on Unavailable for a short amount of time, then give up
+ // without returning a fatal error. The utility of this RPC is by nature
+ // transient (since the deadline is relative to the current time) and it
+ // isn't crucial for correctness (since expired messages will just be
+ // resent).
+ cctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ bo := gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: time.Second,
+ Multiplier: 2,
+ }
+ for {
+ err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{
+ Subscription: it.subName,
+ AckDeadlineSeconds: deadlineSec,
+ AckIds: ids,
+ })
+ switch status.Code(err) {
+ case codes.Unavailable:
+ if err := gax.Sleep(cctx, bo.Pause()); err == nil {
+ continue
+ }
+ // Treat sleep timeout like RPC timeout.
+ fallthrough
+ case codes.DeadlineExceeded:
+ // Timeout. Not a fatal error, but note that it happened.
+ recordStat(it.ctx, ModAckTimeoutCount, 1)
+ return nil
+ default:
+ // Any other error is fatal.
+ return err
+ }
+ }
+ })
+}
+
+func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, call func([]string) error) bool {
+ ackIDs := make([]string, 0, len(ackIDSet))
+ for k := range ackIDSet {
+ ackIDs = append(ackIDs, k)
+ }
+ var toSend []string
+ for len(ackIDs) > 0 {
+ toSend, ackIDs = splitRequestIDs(ackIDs, maxSize)
+ if err := call(toSend); err != nil {
+ // The underlying client handles retries, so any error is fatal to the
+ // iterator.
+ it.fail(err)
+ return false
+ }
+ }
+ return true
+}
+
+// Send a message to the stream to keep it open. The stream will close if there's no
+// traffic on it for a while. By keeping it open, we delay the start of the
+// expiration timer on messages that are buffered by gRPC or elsewhere in the
+// network. This matters if it takes a long time to process messages relative to the
+// default ack deadline, and if the messages are small enough so that many can fit
+// into the buffer.
+func (it *messageIterator) pingStream() {
+ // Ignore error; if the stream is broken, this doesn't matter anyway.
+ _ = it.ps.Send(&pb.StreamingPullRequest{})
+}
+
+// calcFieldSizeString returns the number of bytes string fields
+// will take up in an encoded proto message.
+func calcFieldSizeString(fields ...string) int {
+ overhead := 0
+ for _, field := range fields {
+ overhead += 1 + len(field) + proto.SizeVarint(uint64(len(field)))
+ }
+ return overhead
+}
+
+// calcFieldSizeInt returns the number of bytes int fields
+// will take up in an encoded proto message.
+func calcFieldSizeInt(fields ...int) int {
+ overhead := 0
+ for _, field := range fields {
+ overhead += 1 + proto.SizeVarint(uint64(field))
+ }
+ return overhead
+}
+
+// splitRequestIDs takes a slice of ackIDs and returns two slices such that the first
+// ackID slice can be used in a request where the payload does not exceed maxSize.
+func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) {
+ size := 0
+ i := 0
+ // TODO(hongalex): Use binary search to find split index, since ackIDs are
+ // fairly constant.
+ for size < maxSize && i < len(ids) {
+ size += calcFieldSizeString(ids[i])
+ i++
+ }
+ if size > maxSize {
+ i--
+ }
+ return ids[:i], ids[i:]
+}
+
+// The deadline to ack is derived from a percentile distribution based
+// on the time it takes to process messages. The percentile chosen is the 99%th
+// percentile - that is, processing times up to the 99%th longest processing
+// times should be safe. The highest 1% may expire. This number was chosen
+// as a way to cover most users' usecases without losing the value of
+// expiration.
+func (it *messageIterator) ackDeadline() time.Duration {
+ pt := time.Duration(it.ackTimeDist.Percentile(.99)) * time.Second
+
+ if pt > maxAckDeadline {
+ return maxAckDeadline
+ }
+ if pt < minAckDeadline {
+ return minAckDeadline
+ }
+ return pt
+}
diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go
new file mode 100644
index 000000000..c4b16e95f
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/message.go
@@ -0,0 +1,100 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "time"
+
+ "github.com/golang/protobuf/ptypes"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+)
+
+// Message represents a Pub/Sub message.
+type Message struct {
+ // ID identifies this message.
+ // This ID is assigned by the server and is populated for Messages obtained from a subscription.
+ // This field is read-only.
+ ID string
+
+ // Data is the actual data in the message.
+ Data []byte
+
+ // Attributes represents the key-value pairs the current message
+ // is labelled with.
+ Attributes map[string]string
+
+ // ackID is the identifier to acknowledge this message.
+ ackID string
+
+ // The time at which the message was published.
+ // This is populated by the server for Messages obtained from a subscription.
+ // This field is read-only.
+ PublishTime time.Time
+
+ // receiveTime is the time the message was received by the client.
+ receiveTime time.Time
+
+ // size is the approximate size of the message's data and attributes.
+ size int
+
+ calledDone bool
+
+ // The done method of the iterator that created this Message.
+ doneFunc func(string, bool, time.Time)
+}
+
+func toMessage(resp *pb.ReceivedMessage) (*Message, error) {
+ if resp.Message == nil {
+ return &Message{ackID: resp.AckId}, nil
+ }
+
+ pubTime, err := ptypes.Timestamp(resp.Message.PublishTime)
+ if err != nil {
+ return nil, err
+ }
+ return &Message{
+ ackID: resp.AckId,
+ Data: resp.Message.Data,
+ Attributes: resp.Message.Attributes,
+ ID: resp.Message.MessageId,
+ PublishTime: pubTime,
+ }, nil
+}
+
+// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback.
+// It should not be called on any other Message value.
+// If message acknowledgement fails, the Message will be redelivered.
+// Client code must call Ack or Nack when finished for each received Message.
+// Calls to Ack or Nack have no effect after the first call.
+func (m *Message) Ack() {
+ m.done(true)
+}
+
+// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback.
+// It should not be called on any other Message value.
+// Nack will result in the Message being redelivered more quickly than if it were allowed to expire.
+// Client code must call Ack or Nack when finished for each received Message.
+// Calls to Ack or Nack have no effect after the first call.
+func (m *Message) Nack() {
+ m.done(false)
+}
+
+func (m *Message) done(ack bool) {
+ if m.calledDone {
+ return
+ }
+ m.calledDone = true
+ m.doneFunc(m.ackID, ack, m.receiveTime)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/nodebug.go b/vendor/cloud.google.com/go/pubsub/nodebug.go
new file mode 100644
index 000000000..774a74a58
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/nodebug.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !psdebug
+
+package pubsub
+
+import "time"
+
+func addRecv(string, string, time.Time) {}
+
+func addAcks([]string) {}
+
+func addModAcks([]string, int32) {}
diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go
new file mode 100644
index 000000000..9dead694b
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/pubsub.go
@@ -0,0 +1,108 @@
+// Copyright 2014 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub // import "cloud.google.com/go/pubsub"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+
+ "cloud.google.com/go/internal/version"
+ vkit "cloud.google.com/go/pubsub/apiv1"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+)
+
+const (
+ // ScopePubSub grants permissions to view and manage Pub/Sub
+ // topics and subscriptions.
+ ScopePubSub = "https://www.googleapis.com/auth/pubsub"
+
+ // ScopeCloudPlatform grants permissions to view and manage your data
+ // across Google Cloud Platform services.
+ ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform"
+
+ maxAckDeadline = 10 * time.Minute
+)
+
+// Client is a Google Pub/Sub client scoped to a single project.
+//
+// Clients should be reused rather than being created as needed.
+// A Client may be shared by multiple goroutines.
+type Client struct {
+ projectID string
+ pubc *vkit.PublisherClient
+ subc *vkit.SubscriberClient
+}
+
+// NewClient creates a new PubSub client.
+func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) {
+ var o []option.ClientOption
+ // Environment variables for gcloud emulator:
+ // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/
+ if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" {
+ conn, err := grpc.Dial(addr, grpc.WithInsecure())
+ if err != nil {
+ return nil, fmt.Errorf("grpc.Dial: %v", err)
+ }
+ o = []option.ClientOption{option.WithGRPCConn(conn)}
+ } else {
+ o = []option.ClientOption{
+ // Create multiple connections to increase throughput.
+ option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)),
+ option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: 5 * time.Minute,
+ })),
+ }
+ o = append(o, openCensusOptions()...)
+ }
+ o = append(o, opts...)
+ pubc, err := vkit.NewPublisherClient(ctx, o...)
+ if err != nil {
+ return nil, fmt.Errorf("pubsub: %v", err)
+ }
+ subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection()))
+ if err != nil {
+ // Should never happen, since we are passing in the connection.
+ // If it does, we cannot close, because the user may have passed in their
+ // own connection originally.
+ return nil, fmt.Errorf("pubsub: %v", err)
+ }
+ pubc.SetGoogleClientInfo("gccl", version.Repo)
+ return &Client{
+ projectID: projectID,
+ pubc: pubc,
+ subc: subc,
+ }, nil
+}
+
+// Close releases any resources held by the client,
+// such as memory and goroutines.
+//
+// If the client is available for the lifetime of the program, then Close need not be
+// called at exit.
+func (c *Client) Close() error {
+ // Return the first error, because the first call closes the connection.
+ err := c.pubc.Close()
+ _ = c.subc.Close()
+ return err
+}
+
+func (c *Client) fullyQualifiedProjectName() string {
+ return fmt.Sprintf("projects/%s", c.projectID)
+}
diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go
new file mode 100644
index 000000000..1c332ef20
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/pullstream.go
@@ -0,0 +1,192 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "io"
+ "sync"
+ "time"
+
+ gax "github.com/googleapis/gax-go/v2"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+ "google.golang.org/grpc"
+)
+
+// A pullStream supports the methods of a StreamingPullClient, but re-opens
+// the stream on a retryable error.
+type pullStream struct {
+ ctx context.Context
+ open func() (pb.Subscriber_StreamingPullClient, error)
+
+ mu sync.Mutex
+ spc *pb.Subscriber_StreamingPullClient
+ err error // permanent error
+}
+
+// for testing
+type streamingPullFunc func(context.Context, ...gax.CallOption) (pb.Subscriber_StreamingPullClient, error)
+
+func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string) *pullStream {
+ ctx = withSubscriptionKey(ctx, subName)
+ return &pullStream{
+ ctx: ctx,
+ open: func() (pb.Subscriber_StreamingPullClient, error) {
+ spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes)))
+ if err == nil {
+ recordStat(ctx, StreamRequestCount, 1)
+ err = spc.Send(&pb.StreamingPullRequest{
+ Subscription: subName,
+ // We modack messages when we receive them, so this value doesn't matter too much.
+ StreamAckDeadlineSeconds: 60,
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+ return spc, nil
+ },
+ }
+}
+
+// get returns either a valid *StreamingPullClient (SPC), or a permanent error.
+// If the argument is nil, this is the first call for an RPC, and the current
+// SPC will be returned (or a new one will be opened). Otherwise, this call is a
+// request to re-open the stream because of a retryable error, and the argument
+// is a pointer to the SPC that returned the error.
+func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber_StreamingPullClient, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ // A stored error is permanent.
+ if s.err != nil {
+ return nil, s.err
+ }
+ // If the context is done, so are we.
+ s.err = s.ctx.Err()
+ if s.err != nil {
+ return nil, s.err
+ }
+
+ // If the current and argument SPCs differ, return the current one. This subsumes two cases:
+ // 1. We have an SPC and the caller is getting the stream for the first time.
+ // 2. The caller wants to retry, but they have an older SPC; we've already retried.
+ if spc != s.spc {
+ return s.spc, nil
+ }
+ // Either this is the very first call on this stream (s.spc == nil), or we have a valid
+ // retry request. Either way, open a new stream.
+ // The lock is held here for a long time, but it doesn't matter because no callers could get
+ // anything done anyway.
+ s.spc = new(pb.Subscriber_StreamingPullClient)
+ *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent.
+ return s.spc, s.err
+}
+
+func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) {
+ r := defaultRetryer{}
+ for {
+ recordStat(s.ctx, StreamOpenCount, 1)
+ spc, err := s.open()
+ bo, shouldRetry := r.Retry(err)
+ if err != nil && shouldRetry {
+ recordStat(s.ctx, StreamRetryCount, 1)
+ if err := gax.Sleep(s.ctx, bo); err != nil {
+ return nil, err
+ }
+ continue
+ }
+ return spc, err
+ }
+}
+
+func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error, opts ...gax.CallOption) error {
+ var settings gax.CallSettings
+ for _, opt := range opts {
+ opt.Resolve(&settings)
+ }
+ var r gax.Retryer = &defaultRetryer{}
+ if settings.Retry != nil {
+ r = settings.Retry()
+ }
+
+ var (
+ spc *pb.Subscriber_StreamingPullClient
+ err error
+ )
+ for {
+ spc, err = s.get(spc)
+ if err != nil {
+ return err
+ }
+ start := time.Now()
+ err = f(*spc)
+ if err != nil {
+ bo, shouldRetry := r.Retry(err)
+ if shouldRetry {
+ recordStat(s.ctx, StreamRetryCount, 1)
+ if time.Since(start) < 30*time.Second { // don't sleep if we've been blocked for a while
+ if err := gax.Sleep(s.ctx, bo); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ s.mu.Lock()
+ s.err = err
+ s.mu.Unlock()
+ }
+ return err
+ }
+}
+
+func (s *pullStream) Send(req *pb.StreamingPullRequest) error {
+ return s.call(func(spc pb.Subscriber_StreamingPullClient) error {
+ recordStat(s.ctx, AckCount, int64(len(req.AckIds)))
+ zeroes := 0
+ for _, mds := range req.ModifyDeadlineSeconds {
+ if mds == 0 {
+ zeroes++
+ }
+ }
+ recordStat(s.ctx, NackCount, int64(zeroes))
+ recordStat(s.ctx, ModAckCount, int64(len(req.ModifyDeadlineSeconds)-zeroes))
+ recordStat(s.ctx, StreamRequestCount, 1)
+ return spc.Send(req)
+ })
+}
+
+func (s *pullStream) Recv() (*pb.StreamingPullResponse, error) {
+ var res *pb.StreamingPullResponse
+ err := s.call(func(spc pb.Subscriber_StreamingPullClient) error {
+ var err error
+ recordStat(s.ctx, StreamResponseCount, 1)
+ res, err = spc.Recv()
+ if err == nil {
+ recordStat(s.ctx, PullCount, int64(len(res.ReceivedMessages)))
+ }
+ return err
+ }, gax.WithRetry(func() gax.Retryer { return &streamingPullRetryer{defaultRetryer: &defaultRetryer{}} }))
+ return res, err
+}
+
+func (s *pullStream) CloseSend() error {
+ err := s.call(func(spc pb.Subscriber_StreamingPullClient) error {
+ return spc.CloseSend()
+ })
+ s.mu.Lock()
+ s.err = io.EOF // should not be retried
+ s.mu.Unlock()
+ return err
+}
diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go
new file mode 100644
index 000000000..a22b9147f
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/service.go
@@ -0,0 +1,100 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "time"
+
+ gax "github.com/googleapis/gax-go/v2"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// maxPayload is the maximum number of bytes to devote to the
+// encoded AcknowledgementRequest / ModifyAckDeadline proto message.
+//
+// With gRPC there is no way for the client to know the server's max message size (it is
+// configurable on the server). We know from experience that it
+// it 512K.
+const (
+ maxPayload = 512 * 1024
+ maxSendRecvBytes = 20 * 1024 * 1024 // 20M
+)
+
+func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) {
+ msgs := make([]*Message, 0, len(rms))
+ for i, m := range rms {
+ msg, err := toMessage(m)
+ if err != nil {
+ return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m)
+ }
+ msgs = append(msgs, msg)
+ }
+ return msgs, nil
+}
+
+func trunc32(i int64) int32 {
+ if i > math.MaxInt32 {
+ i = math.MaxInt32
+ }
+ return int32(i)
+}
+
+type defaultRetryer struct {
+ bo gax.Backoff
+}
+
+// Logic originally from
+// https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-clients/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java
+func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) {
+ s, ok := status.FromError(err)
+ if !ok { // includes io.EOF, normal stream close, which causes us to reopen
+ return r.bo.Pause(), true
+ }
+ switch s.Code() {
+ case codes.DeadlineExceeded, codes.Internal, codes.ResourceExhausted, codes.Aborted:
+ return r.bo.Pause(), true
+ case codes.Unavailable:
+ c := strings.Contains(s.Message(), "Server shutdownNow invoked")
+ if !c {
+ return r.bo.Pause(), true
+ }
+ return 0, false
+ default:
+ return 0, false
+ }
+}
+
+type streamingPullRetryer struct {
+ defaultRetryer gax.Retryer
+}
+
+// Does not retry ResourceExhausted. See: https://github.com/GoogleCloudPlatform/google-cloud-go/issues/1166#issuecomment-443744705
+func (r *streamingPullRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) {
+ s, ok := status.FromError(err)
+ if !ok { // call defaultRetryer so that its backoff can be used
+ return r.defaultRetryer.Retry(err)
+ }
+ switch s.Code() {
+ case codes.ResourceExhausted:
+ return 0, false
+ default:
+ return r.defaultRetryer.Retry(err)
+ }
+}
diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go
new file mode 100644
index 000000000..c2a28d781
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/snapshot.go
@@ -0,0 +1,160 @@
+// Copyright 2017 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/ptypes"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+)
+
+// Snapshot is a reference to a PubSub snapshot.
+type Snapshot struct {
+ c *Client
+
+ // The fully qualified identifier for the snapshot, in the format "projects//snapshots/"
+ name string
+}
+
+// ID returns the unique identifier of the snapshot within its project.
+func (s *Snapshot) ID() string {
+ slash := strings.LastIndex(s.name, "/")
+ if slash == -1 {
+ // name is not a fully-qualified name.
+ panic("bad snapshot name")
+ }
+ return s.name[slash+1:]
+}
+
+// SnapshotConfig contains the details of a Snapshot.
+type SnapshotConfig struct {
+ *Snapshot
+ Topic *Topic
+ Expiration time.Time
+}
+
+// Snapshot creates a reference to a snapshot.
+func (c *Client) Snapshot(id string) *Snapshot {
+ return &Snapshot{
+ c: c,
+ name: fmt.Sprintf("projects/%s/snapshots/%s", c.projectID, id),
+ }
+}
+
+// Snapshots returns an iterator which returns snapshots for this project.
+func (c *Client) Snapshots(ctx context.Context) *SnapshotConfigIterator {
+ it := c.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{
+ Project: c.fullyQualifiedProjectName(),
+ })
+ next := func() (*SnapshotConfig, error) {
+ snap, err := it.Next()
+ if err != nil {
+ return nil, err
+ }
+ return toSnapshotConfig(snap, c)
+ }
+ return &SnapshotConfigIterator{next: next}
+}
+
+// SnapshotConfigIterator is an iterator that returns a series of snapshots.
+type SnapshotConfigIterator struct {
+ next func() (*SnapshotConfig, error)
+}
+
+// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results.
+// Once Next returns iterator.Done, all subsequent calls will return iterator.Done.
+func (snaps *SnapshotConfigIterator) Next() (*SnapshotConfig, error) {
+ return snaps.next()
+}
+
+// Delete deletes a snapshot.
+func (s *Snapshot) Delete(ctx context.Context) error {
+ return s.c.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: s.name})
+}
+
+// SeekToTime seeks the subscription to a point in time.
+//
+// Messages retained in the subscription that were published before this
+// time are marked as acknowledged, and messages retained in the
+// subscription that were published after this time are marked as
+// unacknowledged. Note that this operation affects only those messages
+// retained in the subscription (configured by SnapshotConfig). For example,
+// if `time` corresponds to a point before the message retention
+// window (or to a point before the system's notion of the subscription
+// creation time), only retained messages will be marked as unacknowledged,
+// and already-expunged messages will not be restored.
+func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error {
+ ts, err := ptypes.TimestampProto(t)
+ if err != nil {
+ return err
+ }
+ _, err = s.c.subc.Seek(ctx, &pb.SeekRequest{
+ Subscription: s.name,
+ Target: &pb.SeekRequest_Time{Time: ts},
+ })
+ return err
+}
+
+// CreateSnapshot creates a new snapshot from this subscription.
+// The snapshot will be for the topic this subscription is subscribed to.
+// If the name is empty string, a unique name is assigned.
+//
+// The created snapshot is guaranteed to retain:
+// (a) The existing backlog on the subscription. More precisely, this is
+// defined as the messages in the subscription's backlog that are
+// unacknowledged when Snapshot returns without error.
+// (b) Any messages published to the subscription's topic following
+// Snapshot returning without error.
+func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) {
+ if name != "" {
+ name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name)
+ }
+ snap, err := s.c.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{
+ Name: name,
+ Subscription: s.name,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return toSnapshotConfig(snap, s.c)
+}
+
+// SeekToSnapshot seeks the subscription to a snapshot.
+//
+// The snapshot need not be created from this subscription,
+// but it must be for the topic this subscription is subscribed to.
+func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error {
+ _, err := s.c.subc.Seek(ctx, &pb.SeekRequest{
+ Subscription: s.name,
+ Target: &pb.SeekRequest_Snapshot{Snapshot: snap.name},
+ })
+ return err
+}
+
+func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) {
+ exp, err := ptypes.Timestamp(snap.ExpireTime)
+ if err != nil {
+ return nil, err
+ }
+ return &SnapshotConfig{
+ Snapshot: &Snapshot{c: c, name: snap.Name},
+ Topic: newTopic(c, snap.Topic),
+ Expiration: exp,
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go
new file mode 100644
index 000000000..bfd9dfb92
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/subscription.go
@@ -0,0 +1,741 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "time"
+
+ "cloud.google.com/go/iam"
+ "cloud.google.com/go/internal/optional"
+ "github.com/golang/protobuf/ptypes"
+ durpb "github.com/golang/protobuf/ptypes/duration"
+ gax "github.com/googleapis/gax-go/v2"
+ "golang.org/x/sync/errgroup"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+ fmpb "google.golang.org/genproto/protobuf/field_mask"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// Subscription is a reference to a PubSub subscription.
+type Subscription struct {
+ c *Client
+
+ // The fully qualified identifier for the subscription, in the format "projects//subscriptions/"
+ name string
+
+ // Settings for pulling messages. Configure these before calling Receive.
+ ReceiveSettings ReceiveSettings
+
+ mu sync.Mutex
+ receiveActive bool
+}
+
+// Subscription creates a reference to a subscription.
+func (c *Client) Subscription(id string) *Subscription {
+ return c.SubscriptionInProject(id, c.projectID)
+}
+
+// SubscriptionInProject creates a reference to a subscription in a given project.
+func (c *Client) SubscriptionInProject(id, projectID string) *Subscription {
+ return &Subscription{
+ c: c,
+ name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, id),
+ }
+}
+
+// String returns the globally unique printable name of the subscription.
+func (s *Subscription) String() string {
+ return s.name
+}
+
+// ID returns the unique identifier of the subscription within its project.
+func (s *Subscription) ID() string {
+ slash := strings.LastIndex(s.name, "/")
+ if slash == -1 {
+ // name is not a fully-qualified name.
+ panic("bad subscription name")
+ }
+ return s.name[slash+1:]
+}
+
+// Subscriptions returns an iterator which returns all of the subscriptions for the client's project.
+func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator {
+ it := c.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{
+ Project: c.fullyQualifiedProjectName(),
+ })
+ return &SubscriptionIterator{
+ c: c,
+ next: func() (string, error) {
+ sub, err := it.Next()
+ if err != nil {
+ return "", err
+ }
+ return sub.Name, nil
+ },
+ }
+}
+
+// SubscriptionIterator is an iterator that returns a series of subscriptions.
+type SubscriptionIterator struct {
+ c *Client
+ next func() (string, error)
+}
+
+// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned.
+func (subs *SubscriptionIterator) Next() (*Subscription, error) {
+ subName, err := subs.next()
+ if err != nil {
+ return nil, err
+ }
+ return &Subscription{c: subs.c, name: subName}, nil
+}
+
+// PushConfig contains configuration for subscriptions that operate in push mode.
+type PushConfig struct {
+ // A URL locating the endpoint to which messages should be pushed.
+ Endpoint string
+
+ // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details.
+ Attributes map[string]string
+
+ // AuthenticationMethod is used by push endpoints to verify the source
+ // of push requests.
+ // It can be used with push endpoints that are private by default to
+ // allow requests only from the Cloud Pub/Sub system, for example.
+ // This field is optional and should be set only by users interested in
+ // authenticated push.
+ //
+ // It is EXPERIMENTAL and a part of a closed alpha that may not be
+ // accessible to all users. This field is subject to change or removal
+ // without notice.
+ AuthenticationMethod AuthenticationMethod
+}
+
+func (pc *PushConfig) toProto() *pb.PushConfig {
+ if pc == nil {
+ return nil
+ }
+ pbCfg := &pb.PushConfig{
+ Attributes: pc.Attributes,
+ PushEndpoint: pc.Endpoint,
+ }
+ if authMethod := pc.AuthenticationMethod; authMethod != nil {
+ switch am := authMethod.(type) {
+ case *OIDCToken:
+ pbCfg.AuthenticationMethod = am.toProto()
+ default: // TODO: add others here when GAIC adds more definitions.
+ }
+ }
+ return pbCfg
+}
+
+// AuthenticationMethod is used by push points to verify the source of push requests.
+// This interface defines fields that are part of a closed alpha that may not be accessible
+// to all users.
+type AuthenticationMethod interface {
+ isAuthMethod() bool
+}
+
+// OIDCToken allows PushConfigs to be authenticated using
+// the OpenID Connect protocol https://openid.net/connect/
+type OIDCToken struct {
+ // Audience to be used when generating OIDC token. The audience claim
+ // identifies the recipients that the JWT is intended for. The audience
+ // value is a single case-sensitive string. Having multiple values (array)
+ // for the audience field is not supported. More info about the OIDC JWT
+ // token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3
+ // Note: if not specified, the Push endpoint URL will be used.
+ Audience string
+
+ // The service account email to be used for generating the OpenID Connect token.
+ // The caller of:
+ // * CreateSubscription
+ // * UpdateSubscription
+ // * ModifyPushConfig
+ // calls must have the iam.serviceAccounts.actAs permission for the service account.
+ // See https://cloud.google.com/iam/docs/understanding-roles#service-accounts-roles.
+ ServiceAccountEmail string
+}
+
+var _ AuthenticationMethod = (*OIDCToken)(nil)
+
+func (oidcToken *OIDCToken) isAuthMethod() bool { return true }
+
+func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ {
+ if oidcToken == nil {
+ return nil
+ }
+ return &pb.PushConfig_OidcToken_{
+ OidcToken: &pb.PushConfig_OidcToken{
+ Audience: oidcToken.Audience,
+ ServiceAccountEmail: oidcToken.ServiceAccountEmail,
+ },
+ }
+}
+
+// SubscriptionConfig describes the configuration of a subscription.
+type SubscriptionConfig struct {
+ Topic *Topic
+ PushConfig PushConfig
+
+ // The default maximum time after a subscriber receives a message before
+ // the subscriber should acknowledge the message. Note: messages which are
+ // obtained via Subscription.Receive need not be acknowledged within this
+ // deadline, as the deadline will be automatically extended.
+ AckDeadline time.Duration
+
+ // Whether to retain acknowledged messages. If true, acknowledged messages
+ // will not be expunged until they fall out of the RetentionDuration window.
+ RetainAckedMessages bool
+
+ // How long to retain messages in backlog, from the time of publish. If
+ // RetainAckedMessages is true, this duration affects the retention of
+ // acknowledged messages, otherwise only unacknowledged messages are retained.
+ // Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes.
+ RetentionDuration time.Duration
+
+ // Expiration policy specifies the conditions for a subscription's expiration.
+ // A subscription is considered active as long as any connected subscriber is
+ // successfully consuming messages from the subscription or is issuing
+ // operations on the subscription. If `expiration_policy` is not set, a
+ // *default policy* with `ttl` of 31 days will be used. The minimum allowed
+ // value for `expiration_policy.ttl` is 1 day.
+ //
+ // Use time.Duration(0) to indicate that the subscription should never expire.
+ //
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ ExpirationPolicy optional.Duration
+
+ // The set of labels for the subscription.
+ Labels map[string]string
+}
+
+func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription {
+ var pbPushConfig *pb.PushConfig
+ if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 || cfg.PushConfig.AuthenticationMethod != nil {
+ pbPushConfig = cfg.PushConfig.toProto()
+ }
+ var retentionDuration *durpb.Duration
+ if cfg.RetentionDuration != 0 {
+ retentionDuration = ptypes.DurationProto(cfg.RetentionDuration)
+ }
+ return &pb.Subscription{
+ Name: name,
+ Topic: cfg.Topic.name,
+ PushConfig: pbPushConfig,
+ AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())),
+ RetainAckedMessages: cfg.RetainAckedMessages,
+ MessageRetentionDuration: retentionDuration,
+ Labels: cfg.Labels,
+ ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy),
+ }
+}
+
+func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) {
+ rd := time.Hour * 24 * 7
+ var err error
+ if pbSub.MessageRetentionDuration != nil {
+ rd, err = ptypes.Duration(pbSub.MessageRetentionDuration)
+ if err != nil {
+ return SubscriptionConfig{}, err
+ }
+ }
+ var expirationPolicy time.Duration
+ if ttl := pbSub.ExpirationPolicy.GetTtl(); ttl != nil {
+ expirationPolicy, err = ptypes.Duration(ttl)
+ if err != nil {
+ return SubscriptionConfig{}, err
+ }
+ }
+ subC := SubscriptionConfig{
+ Topic: newTopic(c, pbSub.Topic),
+ AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds),
+ RetainAckedMessages: pbSub.RetainAckedMessages,
+ RetentionDuration: rd,
+ Labels: pbSub.Labels,
+ ExpirationPolicy: expirationPolicy,
+ }
+ pc := protoToPushConfig(pbSub.PushConfig)
+ if pc != nil {
+ subC.PushConfig = *pc
+ }
+ return subC, nil
+}
+
+func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig {
+ if pbPc == nil {
+ return nil
+ }
+ pc := &PushConfig{
+ Endpoint: pbPc.PushEndpoint,
+ Attributes: pbPc.Attributes,
+ }
+ if am := pbPc.AuthenticationMethod; am != nil {
+ if oidcToken, ok := am.(*pb.PushConfig_OidcToken_); ok && oidcToken != nil && oidcToken.OidcToken != nil {
+ pc.AuthenticationMethod = &OIDCToken{
+ Audience: oidcToken.OidcToken.GetAudience(),
+ ServiceAccountEmail: oidcToken.OidcToken.GetServiceAccountEmail(),
+ }
+ }
+ }
+ return pc
+}
+
+// ReceiveSettings configure the Receive method.
+// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings.
+type ReceiveSettings struct {
+ // MaxExtension is the maximum period for which the Subscription should
+ // automatically extend the ack deadline for each message.
+ //
+ // The Subscription will automatically extend the ack deadline of all
+ // fetched Messages up to the duration specified. Automatic deadline
+ // extension beyond the initial receipt may be disabled by specifying a
+ // duration less than 0.
+ MaxExtension time.Duration
+
+ // MaxOutstandingMessages is the maximum number of unprocessed messages
+ // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it
+ // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages.
+ // If the value is negative, then there will be no limit on the number of
+ // unprocessed messages.
+ MaxOutstandingMessages int
+
+ // MaxOutstandingBytes is the maximum size of unprocessed messages
+ // (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will
+ // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If
+ // the value is negative, then there will be no limit on the number of bytes
+ // for unprocessed messages.
+ MaxOutstandingBytes int
+
+ // NumGoroutines is the number of goroutines Receive will spawn to pull
+ // messages concurrently. If NumGoroutines is less than 1, it will be treated
+ // as if it were DefaultReceiveSettings.NumGoroutines.
+ //
+ // NumGoroutines does not limit the number of messages that can be processed
+ // concurrently. Even with one goroutine, many messages might be processed at
+ // once, because that goroutine may continually receive messages and invoke the
+ // function passed to Receive on them. To limit the number of messages being
+ // processed concurrently, set MaxOutstandingMessages.
+ NumGoroutines int
+
+ // If Synchronous is true, then no more than MaxOutstandingMessages will be in
+ // memory at one time. (In contrast, when Synchronous is false, more than
+ // MaxOutstandingMessages may have been received from the service and in memory
+ // before being processed.) MaxOutstandingBytes still refers to the total bytes
+ // processed, rather than in memory. NumGoroutines is ignored.
+ // The default is false.
+ Synchronous bool
+}
+
+// For synchronous receive, the time to wait if we are already processing
+// MaxOutstandingMessages. There is no point calling Pull and asking for zero
+// messages, so we pause to allow some message-processing callbacks to finish.
+//
+// The wait time is large enough to avoid consuming significant CPU, but
+// small enough to provide decent throughput. Users who want better
+// throughput should not be using synchronous mode.
+//
+// Waiting might seem like polling, so it's natural to think we could do better by
+// noticing when a callback is finished and immediately calling Pull. But if
+// callbacks finish in quick succession, this will result in frequent Pull RPCs that
+// request a single message, which wastes network bandwidth. Better to wait for a few
+// callbacks to finish, so we make fewer RPCs fetching more messages.
+//
+// This value is unexported so the user doesn't have another knob to think about. Note that
+// it is the same value as the one used for nackTicker, so it matches this client's
+// idea of a duration that is short, but not so short that we perform excessive RPCs.
+const synchronousWaitTime = 100 * time.Millisecond
+
+// This is a var so that tests can change it.
+var minAckDeadline = 10 * time.Second
+
+// DefaultReceiveSettings holds the default values for ReceiveSettings.
+var DefaultReceiveSettings = ReceiveSettings{
+ MaxExtension: 10 * time.Minute,
+ MaxOutstandingMessages: 1000,
+ MaxOutstandingBytes: 1e9, // 1G
+ NumGoroutines: 1,
+}
+
+// Delete deletes the subscription.
+func (s *Subscription) Delete(ctx context.Context) error {
+ return s.c.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: s.name})
+}
+
+// Exists reports whether the subscription exists on the server.
+func (s *Subscription) Exists(ctx context.Context) (bool, error) {
+ _, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name})
+ if err == nil {
+ return true, nil
+ }
+ if status.Code(err) == codes.NotFound {
+ return false, nil
+ }
+ return false, err
+}
+
+// Config fetches the current configuration for the subscription.
+func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) {
+ pbSub, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name})
+ if err != nil {
+ return SubscriptionConfig{}, err
+ }
+ cfg, err := protoToSubscriptionConfig(pbSub, s.c)
+ if err != nil {
+ return SubscriptionConfig{}, err
+ }
+ return cfg, nil
+}
+
+// SubscriptionConfigToUpdate describes how to update a subscription.
+type SubscriptionConfigToUpdate struct {
+ // If non-nil, the push config is changed.
+ PushConfig *PushConfig
+
+ // If non-zero, the ack deadline is changed.
+ AckDeadline time.Duration
+
+ // If set, RetainAckedMessages is changed.
+ RetainAckedMessages optional.Bool
+
+ // If non-zero, RetentionDuration is changed.
+ RetentionDuration time.Duration
+
+ // If non-zero, Expiration is changed.
+ ExpirationPolicy optional.Duration
+
+ // If non-nil, the current set of labels is completely
+ // replaced by the new set.
+ // This field has beta status. It is not subject to the stability guarantee
+ // and may change.
+ Labels map[string]string
+}
+
+// Update changes an existing subscription according to the fields set in cfg.
+// It returns the new SubscriptionConfig.
+//
+// Update returns an error if no fields were modified.
+func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) {
+ req := s.updateRequest(&cfg)
+ if err := cfg.validate(); err != nil {
+ return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %v", err)
+ }
+ if len(req.UpdateMask.Paths) == 0 {
+ return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update")
+ }
+ rpsub, err := s.c.subc.UpdateSubscription(ctx, req)
+ if err != nil {
+ return SubscriptionConfig{}, err
+ }
+ return protoToSubscriptionConfig(rpsub, s.c)
+}
+
+func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.UpdateSubscriptionRequest {
+ psub := &pb.Subscription{Name: s.name}
+ var paths []string
+ if cfg.PushConfig != nil {
+ psub.PushConfig = cfg.PushConfig.toProto()
+ paths = append(paths, "push_config")
+ }
+ if cfg.AckDeadline != 0 {
+ psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds()))
+ paths = append(paths, "ack_deadline_seconds")
+ }
+ if cfg.RetainAckedMessages != nil {
+ psub.RetainAckedMessages = optional.ToBool(cfg.RetainAckedMessages)
+ paths = append(paths, "retain_acked_messages")
+ }
+ if cfg.RetentionDuration != 0 {
+ psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration)
+ paths = append(paths, "message_retention_duration")
+ }
+ if cfg.ExpirationPolicy != nil {
+ psub.ExpirationPolicy = expirationPolicyToProto(cfg.ExpirationPolicy)
+ paths = append(paths, "expiration_policy")
+ }
+ if cfg.Labels != nil {
+ psub.Labels = cfg.Labels
+ paths = append(paths, "labels")
+ }
+ return &pb.UpdateSubscriptionRequest{
+ Subscription: psub,
+ UpdateMask: &fmpb.FieldMask{Paths: paths},
+ }
+}
+
+const (
+ // The minimum expiration policy duration is 1 day as per:
+ // https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L606-L607
+ minExpirationPolicy = 24 * time.Hour
+
+ // If an expiration policy is not specified, the default of 31 days is used as per:
+ // https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L605-L606
+ defaultExpirationPolicy = 31 * 24 * time.Hour
+)
+
+func (cfg *SubscriptionConfigToUpdate) validate() error {
+ if cfg == nil || cfg.ExpirationPolicy == nil {
+ return nil
+ }
+ policy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy
+ if policy == 0 || policy >= min {
+ return nil
+ }
+ return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", policy, min)
+}
+
+func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationPolicy {
+ if expirationPolicy == nil {
+ return nil
+ }
+
+ dur := optional.ToDuration(expirationPolicy)
+ var ttl *durpb.Duration
+ // As per:
+ // https://godoc.org/google.golang.org/genproto/googleapis/pubsub/v1#ExpirationPolicy.Ttl
+ // if ExpirationPolicy.Ttl is set to nil, the expirationPolicy is toggled to NEVER expire.
+ if dur != 0 {
+ ttl = ptypes.DurationProto(dur)
+ }
+ return &pb.ExpirationPolicy{
+ Ttl: ttl,
+ }
+}
+
+// IAM returns the subscription's IAM handle.
+func (s *Subscription) IAM() *iam.Handle {
+ return iam.InternalNewHandle(s.c.subc.Connection(), s.name)
+}
+
+// CreateSubscription creates a new subscription on a topic.
+//
+// id is the name of the subscription to create. It must start with a letter,
+// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-),
+// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It
+// must be between 3 and 255 characters in length, and must not start with
+// "goog".
+//
+// cfg.Topic is the topic from which the subscription should receive messages. It
+// need not belong to the same project as the subscription. This field is required.
+//
+// cfg.AckDeadline is the maximum time after a subscriber receives a message before
+// the subscriber should acknowledge the message. It must be between 10 and 600
+// seconds (inclusive), and is rounded down to the nearest second. If the
+// provided ackDeadline is 0, then the default value of 10 seconds is used.
+// Note: messages which are obtained via Subscription.Receive need not be
+// acknowledged within this deadline, as the deadline will be automatically
+// extended.
+//
+// cfg.PushConfig may be set to configure this subscription for push delivery.
+//
+// If the subscription already exists an error will be returned.
+func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) {
+ if cfg.Topic == nil {
+ return nil, errors.New("pubsub: require non-nil Topic")
+ }
+ if cfg.AckDeadline == 0 {
+ cfg.AckDeadline = 10 * time.Second
+ }
+ if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second {
+ return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d)
+ }
+
+ sub := c.Subscription(id)
+ _, err := c.subc.CreateSubscription(ctx, cfg.toProto(sub.name))
+ if err != nil {
+ return nil, err
+ }
+ return sub, nil
+}
+
+var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription")
+
+// Receive calls f with the outstanding messages from the subscription.
+// It blocks until ctx is done, or the service returns a non-retryable error.
+//
+// The standard way to terminate a Receive is to cancel its context:
+//
+// cctx, cancel := context.WithCancel(ctx)
+// err := sub.Receive(cctx, callback)
+// // Call cancel from callback, or another goroutine.
+//
+// If the service returns a non-retryable error, Receive returns that error after
+// all of the outstanding calls to f have returned. If ctx is done, Receive
+// returns nil after all of the outstanding calls to f have returned and
+// all messages have been acknowledged or have expired.
+//
+// Receive calls f concurrently from multiple goroutines. It is encouraged to
+// process messages synchronously in f, even if that processing is relatively
+// time-consuming; Receive will spawn new goroutines for incoming messages,
+// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings.
+//
+// The context passed to f will be canceled when ctx is Done or there is a
+// fatal service error.
+//
+// Receive will send an ack deadline extension on message receipt, then
+// automatically extend the ack deadline of all fetched Messages up to the
+// period specified by s.ReceiveSettings.MaxExtension.
+//
+// Each Subscription may have only one invocation of Receive active at a time.
+func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error {
+ s.mu.Lock()
+ if s.receiveActive {
+ s.mu.Unlock()
+ return errReceiveInProgress
+ }
+ s.receiveActive = true
+ s.mu.Unlock()
+ defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }()
+
+ maxCount := s.ReceiveSettings.MaxOutstandingMessages
+ if maxCount == 0 {
+ maxCount = DefaultReceiveSettings.MaxOutstandingMessages
+ }
+ maxBytes := s.ReceiveSettings.MaxOutstandingBytes
+ if maxBytes == 0 {
+ maxBytes = DefaultReceiveSettings.MaxOutstandingBytes
+ }
+ maxExt := s.ReceiveSettings.MaxExtension
+ if maxExt == 0 {
+ maxExt = DefaultReceiveSettings.MaxExtension
+ } else if maxExt < 0 {
+ // If MaxExtension is negative, disable automatic extension.
+ maxExt = 0
+ }
+ var numGoroutines int
+ switch {
+ case s.ReceiveSettings.Synchronous:
+ numGoroutines = 1
+ case s.ReceiveSettings.NumGoroutines >= 1:
+ numGoroutines = s.ReceiveSettings.NumGoroutines
+ default:
+ numGoroutines = DefaultReceiveSettings.NumGoroutines
+ }
+ // TODO(jba): add tests that verify that ReceiveSettings are correctly processed.
+ po := &pullOptions{
+ maxExtension: maxExt,
+ maxPrefetch: trunc32(int64(maxCount)),
+ synchronous: s.ReceiveSettings.Synchronous,
+ }
+ fc := newFlowController(maxCount, maxBytes)
+
+ // Wait for all goroutines started by Receive to return, so instead of an
+ // obscure goroutine leak we have an obvious blocked call to Receive.
+ group, gctx := errgroup.WithContext(ctx)
+ for i := 0; i < numGoroutines; i++ {
+ group.Go(func() error {
+ return s.receive(gctx, po, fc, f)
+ })
+ }
+ return group.Wait()
+}
+
+func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error {
+ // Cancel a sub-context when we return, to kick the context-aware callbacks
+ // and the goroutine below.
+ ctx2, cancel := context.WithCancel(ctx)
+ // The iterator does not use the context passed to Receive. If it did, canceling
+ // that context would immediately stop the iterator without waiting for unacked
+ // messages.
+ iter := newMessageIterator(s.c.subc, s.name, po)
+
+ // We cannot use errgroup from Receive here. Receive might already be calling group.Wait,
+ // and group.Wait cannot be called concurrently with group.Go. We give each receive() its
+ // own WaitGroup instead.
+ // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed
+ // to be called after all Adds.
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ <-ctx2.Done()
+ // Call stop when Receive's context is done.
+ // Stop will block until all outstanding messages have been acknowledged
+ // or there was a fatal service error.
+ iter.stop()
+ wg.Done()
+ }()
+ defer wg.Wait()
+
+ defer cancel()
+ for {
+ var maxToPull int32 // maximum number of messages to pull
+ if po.synchronous {
+ if po.maxPrefetch < 0 {
+ // If there is no limit on the number of messages to pull, use a reasonable default.
+ maxToPull = 1000
+ } else {
+ // Limit the number of messages in memory to MaxOutstandingMessages
+ // (here, po.maxPrefetch). For each message currently in memory, we have
+ // called fc.acquire but not fc.release: this is fc.count(). The next
+ // call to Pull should fetch no more than the difference between these
+ // values.
+ maxToPull = po.maxPrefetch - int32(fc.count())
+ if maxToPull <= 0 {
+ // Wait for some callbacks to finish.
+ if err := gax.Sleep(ctx, synchronousWaitTime); err != nil {
+ // Return nil if the context is done, not err.
+ return nil
+ }
+ continue
+ }
+ }
+ }
+ msgs, err := iter.receive(maxToPull)
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ for i, msg := range msgs {
+ msg := msg
+ // TODO(jba): call acquire closer to when the message is allocated.
+ if err := fc.acquire(ctx, len(msg.Data)); err != nil {
+ // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done.
+ for _, m := range msgs[i:] {
+ m.Nack()
+ }
+ // Return nil if the context is done, not err.
+ return nil
+ }
+ old := msg.doneFunc
+ msgLen := len(msg.Data)
+ msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) {
+ defer fc.release(msgLen)
+ old(ackID, ack, receiveTime)
+ }
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ f(ctx2, msg)
+ }()
+ }
+ }
+}
+
+type pullOptions struct {
+ maxExtension time.Duration
+ maxPrefetch int32
+ // If true, use unary Pull instead of StreamingPull, and never pull more
+ // than maxPrefetch messages.
+ synchronous bool
+}
diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go
new file mode 100644
index 000000000..2b31431ed
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/topic.go
@@ -0,0 +1,550 @@
+// Copyright 2016 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "cloud.google.com/go/iam"
+ "github.com/golang/protobuf/proto"
+ gax "github.com/googleapis/gax-go/v2"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/tag"
+ "google.golang.org/api/support/bundler"
+ pb "google.golang.org/genproto/googleapis/pubsub/v1"
+ fmpb "google.golang.org/genproto/protobuf/field_mask"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+const (
+ // MaxPublishRequestCount is the maximum number of messages that can be in
+ // a single publish request, as defined by the PubSub service.
+ MaxPublishRequestCount = 1000
+
+ // MaxPublishRequestBytes is the maximum size of a single publish request
+ // in bytes, as defined by the PubSub service.
+ MaxPublishRequestBytes = 1e7
+)
+
+// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes.
+var ErrOversizedMessage = bundler.ErrOversizedItem
+
+// Topic is a reference to a PubSub topic.
+//
+// The methods of Topic are safe for use by multiple goroutines.
+type Topic struct {
+ c *Client
+ // The fully qualified identifier for the topic, in the format "projects//topics/"
+ name string
+
+ // Settings for publishing messages. All changes must be made before the
+ // first call to Publish. The default is DefaultPublishSettings.
+ PublishSettings PublishSettings
+
+ mu sync.RWMutex
+ stopped bool
+ bundler *bundler.Bundler
+}
+
+// PublishSettings control the bundling of published messages.
+type PublishSettings struct {
+
+ // Publish a non-empty batch after this delay has passed.
+ DelayThreshold time.Duration
+
+ // Publish a batch when it has this many messages. The maximum is
+ // MaxPublishRequestCount.
+ CountThreshold int
+
+ // Publish a batch when its size in bytes reaches this value.
+ ByteThreshold int
+
+ // The number of goroutines that invoke the Publish RPC concurrently.
+ //
+ // Defaults to a multiple of GOMAXPROCS.
+ NumGoroutines int
+
+ // The maximum time that the client will attempt to publish a bundle of messages.
+ Timeout time.Duration
+
+ // The maximum number of bytes that the Bundler will keep in memory before
+ // returning ErrOverflow.
+ //
+ // Defaults to DefaultPublishSettings.BufferedByteLimit.
+ BufferedByteLimit int
+}
+
+// DefaultPublishSettings holds the default values for topics' PublishSettings.
+var DefaultPublishSettings = PublishSettings{
+ DelayThreshold: 1 * time.Millisecond,
+ CountThreshold: 100,
+ ByteThreshold: 1e6,
+ Timeout: 60 * time.Second,
+ // By default, limit the bundler to 10 times the max message size. The number 10 is
+ // chosen as a reasonable amount of messages in the worst case whilst still
+ // capping the number to a low enough value to not OOM users.
+ BufferedByteLimit: 10 * MaxPublishRequestBytes,
+}
+
+// CreateTopic creates a new topic.
+//
+// The specified topic ID must start with a letter, and contain only letters
+// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.),
+// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255
+// characters in length, and must not start with "goog". For more information,
+// see: https://cloud.google.com/pubsub/docs/admin#resource_names
+//
+// If the topic already exists an error will be returned.
+func (c *Client) CreateTopic(ctx context.Context, topicID string) (*Topic, error) {
+ t := c.Topic(topicID)
+ _, err := c.pubc.CreateTopic(ctx, &pb.Topic{Name: t.name})
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+// CreateTopicWithConfig creates a topic from TopicConfig.
+//
+// The specified topic ID must start with a letter, and contain only letters
+// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.),
+// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255
+// characters in length, and must not start with "goog". For more information,
+// see: https://cloud.google.com/pubsub/docs/admin#resource_names.
+//
+// If the topic already exists, an error will be returned.
+func (c *Client) CreateTopicWithConfig(ctx context.Context, topicID string, tc *TopicConfig) (*Topic, error) {
+ t := c.Topic(topicID)
+ _, err := c.pubc.CreateTopic(ctx, &pb.Topic{
+ Name: t.name,
+ Labels: tc.Labels,
+ MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy),
+ KmsKeyName: tc.KMSKeyName,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return t, nil
+}
+
+// Topic creates a reference to a topic in the client's project.
+//
+// If a Topic's Publish method is called, it has background goroutines
+// associated with it. Clean them up by calling Topic.Stop.
+//
+// Avoid creating many Topic instances if you use them to publish.
+func (c *Client) Topic(id string) *Topic {
+ return c.TopicInProject(id, c.projectID)
+}
+
+// TopicInProject creates a reference to a topic in the given project.
+//
+// If a Topic's Publish method is called, it has background goroutines
+// associated with it. Clean them up by calling Topic.Stop.
+//
+// Avoid creating many Topic instances if you use them to publish.
+func (c *Client) TopicInProject(id, projectID string) *Topic {
+ return newTopic(c, fmt.Sprintf("projects/%s/topics/%s", projectID, id))
+}
+
+func newTopic(c *Client, name string) *Topic {
+ return &Topic{
+ c: c,
+ name: name,
+ PublishSettings: DefaultPublishSettings,
+ }
+}
+
+// TopicConfig describes the configuration of a topic.
+type TopicConfig struct {
+ // The set of labels for the topic.
+ Labels map[string]string
+
+ // The topic's message storage policy.
+ MessageStoragePolicy MessageStoragePolicy
+
+ // The name of the Cloud KMS key to be used to protect access to messages
+ // published to this topic, in the format
+ // "projects/P/locations/L/keyRings/R/cryptoKeys/K".
+ KMSKeyName string
+}
+
+// TopicConfigToUpdate describes how to update a topic.
+type TopicConfigToUpdate struct {
+ // If non-nil, the current set of labels is completely
+ // replaced by the new set.
+ Labels map[string]string
+
+ // If non-nil, the existing policy (containing the list of regions)
+ // is completely replaced by the new policy.
+ //
+ // Use the zero value &MessageStoragePolicy{} to reset the topic back to
+ // using the organization's Resource Location Restriction policy.
+ //
+ // If nil, the policy remains unchanged.
+ //
+ // This field has beta status. It is not subject to the stability guarantee
+ // and may change.
+ MessageStoragePolicy *MessageStoragePolicy
+}
+
+func protoToTopicConfig(pbt *pb.Topic) TopicConfig {
+ return TopicConfig{
+ Labels: pbt.Labels,
+ MessageStoragePolicy: protoToMessageStoragePolicy(pbt.MessageStoragePolicy),
+ KMSKeyName: pbt.KmsKeyName,
+ }
+}
+
+// MessageStoragePolicy constrains how messages published to the topic may be stored. It
+// is determined when the topic is created based on the policy configured at
+// the project level.
+type MessageStoragePolicy struct {
+ // AllowedPersistenceRegions is the list of GCP regions where messages that are published
+ // to the topic may be persisted in storage. Messages published by publishers running in
+ // non-allowed GCP regions (or running outside of GCP altogether) will be
+ // routed for storage in one of the allowed regions.
+ //
+ // If empty, it indicates a misconfiguration at the project or organization level, which
+ // will result in all Publish operations failing. This field cannot be empty in updates.
+ //
+ // If nil, then the policy is not defined on a topic level. When used in updates, it resets
+ // the regions back to the organization level Resource Location Restriction policy.
+ //
+ // For more information, see
+ // https://cloud.google.com/pubsub/docs/resource-location-restriction#pubsub-storage-locations.
+ AllowedPersistenceRegions []string
+}
+
+func protoToMessageStoragePolicy(msp *pb.MessageStoragePolicy) MessageStoragePolicy {
+ if msp == nil {
+ return MessageStoragePolicy{}
+ }
+ return MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions}
+}
+
+func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePolicy {
+ if msp == nil || msp.AllowedPersistenceRegions == nil {
+ return nil
+ }
+ return &pb.MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions}
+}
+
+// Config returns the TopicConfig for the topic.
+func (t *Topic) Config(ctx context.Context) (TopicConfig, error) {
+ pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name})
+ if err != nil {
+ return TopicConfig{}, err
+ }
+ return protoToTopicConfig(pbt), nil
+}
+
+// Update changes an existing topic according to the fields set in cfg. It returns
+// the new TopicConfig.
+func (t *Topic) Update(ctx context.Context, cfg TopicConfigToUpdate) (TopicConfig, error) {
+ req := t.updateRequest(cfg)
+ if len(req.UpdateMask.Paths) == 0 {
+ return TopicConfig{}, errors.New("pubsub: UpdateTopic call with nothing to update")
+ }
+ rpt, err := t.c.pubc.UpdateTopic(ctx, req)
+ if err != nil {
+ return TopicConfig{}, err
+ }
+ return protoToTopicConfig(rpt), nil
+}
+
+func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest {
+ pt := &pb.Topic{Name: t.name}
+ var paths []string
+ if cfg.Labels != nil {
+ pt.Labels = cfg.Labels
+ paths = append(paths, "labels")
+ }
+ if cfg.MessageStoragePolicy != nil {
+ pt.MessageStoragePolicy = messageStoragePolicyToProto(cfg.MessageStoragePolicy)
+ paths = append(paths, "message_storage_policy")
+ }
+ return &pb.UpdateTopicRequest{
+ Topic: pt,
+ UpdateMask: &fmpb.FieldMask{Paths: paths},
+ }
+}
+
+// Topics returns an iterator which returns all of the topics for the client's project.
+func (c *Client) Topics(ctx context.Context) *TopicIterator {
+ it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()})
+ return &TopicIterator{
+ c: c,
+ next: func() (string, error) {
+ topic, err := it.Next()
+ if err != nil {
+ return "", err
+ }
+ return topic.Name, nil
+ },
+ }
+}
+
+// TopicIterator is an iterator that returns a series of topics.
+type TopicIterator struct {
+ c *Client
+ next func() (string, error)
+}
+
+// Next returns the next topic. If there are no more topics, iterator.Done will be returned.
+func (tps *TopicIterator) Next() (*Topic, error) {
+ topicName, err := tps.next()
+ if err != nil {
+ return nil, err
+ }
+ return newTopic(tps.c, topicName), nil
+}
+
+// ID returns the unique identifier of the topic within its project.
+func (t *Topic) ID() string {
+ slash := strings.LastIndex(t.name, "/")
+ if slash == -1 {
+ // name is not a fully-qualified name.
+ panic("bad topic name")
+ }
+ return t.name[slash+1:]
+}
+
+// String returns the printable globally unique name for the topic.
+func (t *Topic) String() string {
+ return t.name
+}
+
+// Delete deletes the topic.
+func (t *Topic) Delete(ctx context.Context) error {
+ return t.c.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: t.name})
+}
+
+// Exists reports whether the topic exists on the server.
+func (t *Topic) Exists(ctx context.Context) (bool, error) {
+ if t.name == "_deleted-topic_" {
+ return false, nil
+ }
+ _, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name})
+ if err == nil {
+ return true, nil
+ }
+ if status.Code(err) == codes.NotFound {
+ return false, nil
+ }
+ return false, err
+}
+
+// IAM returns the topic's IAM handle.
+func (t *Topic) IAM() *iam.Handle {
+ return iam.InternalNewHandle(t.c.pubc.Connection(), t.name)
+}
+
+// Subscriptions returns an iterator which returns the subscriptions for this topic.
+//
+// Some of the returned subscriptions may belong to a project other than t.
+func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator {
+ it := t.c.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{
+ Topic: t.name,
+ })
+ return &SubscriptionIterator{
+ c: t.c,
+ next: it.Next,
+ }
+}
+
+var errTopicStopped = errors.New("pubsub: Stop has been called for this topic")
+
+// Publish publishes msg to the topic asynchronously. Messages are batched and
+// sent according to the topic's PublishSettings. Publish never blocks.
+//
+// Publish returns a non-nil PublishResult which will be ready when the
+// message has been sent (or has failed to be sent) to the server.
+//
+// Publish creates goroutines for batching and sending messages. These goroutines
+// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish
+// will immediately return a PublishResult with an error.
+func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult {
+ // TODO(jba): if this turns out to take significant time, try to approximate it.
+ // Or, convert the messages to protos in Publish, instead of in the service.
+ msg.size = proto.Size(&pb.PubsubMessage{
+ Data: msg.Data,
+ Attributes: msg.Attributes,
+ })
+ r := &PublishResult{ready: make(chan struct{})}
+ t.initBundler()
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here
+ if t.stopped {
+ r.set("", errTopicStopped)
+ return r
+ }
+
+ // TODO(jba) [from bcmills] consider using a shared channel per bundle
+ // (requires Bundler API changes; would reduce allocations)
+ err := t.bundler.Add(&bundledMessage{msg, r}, msg.size)
+ if err != nil {
+ r.set("", err)
+ }
+ return r
+}
+
+// Stop sends all remaining published messages and stop goroutines created for handling
+// publishing. Returns once all outstanding messages have been sent or have
+// failed to be sent.
+func (t *Topic) Stop() {
+ t.mu.Lock()
+ noop := t.stopped || t.bundler == nil
+ t.stopped = true
+ t.mu.Unlock()
+ if noop {
+ return
+ }
+ t.bundler.Flush()
+}
+
+// A PublishResult holds the result from a call to Publish.
+type PublishResult struct {
+ ready chan struct{}
+ serverID string
+ err error
+}
+
+// Ready returns a channel that is closed when the result is ready.
+// When the Ready channel is closed, Get is guaranteed not to block.
+func (r *PublishResult) Ready() <-chan struct{} { return r.ready }
+
+// Get returns the server-generated message ID and/or error result of a Publish call.
+// Get blocks until the Publish call completes or the context is done.
+func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) {
+ // If the result is already ready, return it even if the context is done.
+ select {
+ case <-r.Ready():
+ return r.serverID, r.err
+ default:
+ }
+ select {
+ case <-ctx.Done():
+ return "", ctx.Err()
+ case <-r.Ready():
+ return r.serverID, r.err
+ }
+}
+
+func (r *PublishResult) set(sid string, err error) {
+ r.serverID = sid
+ r.err = err
+ close(r.ready)
+}
+
+type bundledMessage struct {
+ msg *Message
+ res *PublishResult
+}
+
+func (t *Topic) initBundler() {
+ t.mu.RLock()
+ noop := t.stopped || t.bundler != nil
+ t.mu.RUnlock()
+ if noop {
+ return
+ }
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // Must re-check, since we released the lock.
+ if t.stopped || t.bundler != nil {
+ return
+ }
+
+ timeout := t.PublishSettings.Timeout
+ t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) {
+ // TODO(jba): use a context detached from the one passed to NewClient.
+ ctx := context.TODO()
+ if timeout != 0 {
+ var cancel func()
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+ t.publishMessageBundle(ctx, items.([]*bundledMessage))
+ })
+ t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold
+ t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold
+ if t.bundler.BundleCountThreshold > MaxPublishRequestCount {
+ t.bundler.BundleCountThreshold = MaxPublishRequestCount
+ }
+ t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold
+
+ bufferedByteLimit := DefaultPublishSettings.BufferedByteLimit
+ if t.PublishSettings.BufferedByteLimit > 0 {
+ bufferedByteLimit = t.PublishSettings.BufferedByteLimit
+ }
+ t.bundler.BufferedByteLimit = bufferedByteLimit
+
+ t.bundler.BundleByteLimit = MaxPublishRequestBytes
+ // Unless overridden, allow many goroutines per CPU to call the Publish RPC concurrently.
+ // The default value was determined via extensive load testing (see the loadtest subdirectory).
+ if t.PublishSettings.NumGoroutines > 0 {
+ t.bundler.HandlerLimit = t.PublishSettings.NumGoroutines
+ } else {
+ t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0)
+ }
+}
+
+func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) {
+ ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name))
+ if err != nil {
+ log.Printf("pubsub: cannot create context with tag in publishMessageBundle: %v", err)
+ }
+ pbMsgs := make([]*pb.PubsubMessage, len(bms))
+ for i, bm := range bms {
+ pbMsgs[i] = &pb.PubsubMessage{
+ Data: bm.msg.Data,
+ Attributes: bm.msg.Attributes,
+ }
+ bm.msg = nil // release bm.msg for GC
+ }
+ start := time.Now()
+ res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{
+ Topic: t.name,
+ Messages: pbMsgs,
+ }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes)))
+ end := time.Now()
+ if err != nil {
+ // Update context with error tag for OpenCensus,
+ // using same stats.Record() call as success case.
+ ctx, _ = tag.New(ctx, tag.Upsert(keyStatus, "ERROR"),
+ tag.Upsert(keyError, err.Error()))
+ }
+ stats.Record(ctx,
+ PublishLatency.M(float64(end.Sub(start)/time.Millisecond)),
+ PublishedMessages.M(int64(len(bms))))
+ for i, bm := range bms {
+ if err != nil {
+ bm.res.set("", err)
+ } else {
+ bm.res.set(res.MessageIds[i], nil)
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go
new file mode 100644
index 000000000..d21d37c84
--- /dev/null
+++ b/vendor/cloud.google.com/go/pubsub/trace.go
@@ -0,0 +1,217 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pubsub
+
+import (
+ "context"
+ "log"
+ "sync"
+
+ "go.opencensus.io/plugin/ocgrpc"
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+)
+
+func openCensusOptions() []option.ClientOption {
+ return []option.ClientOption{
+ option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})),
+ }
+}
+
+// The following keys are used to tag requests with a specific topic/subscription ID.
+var (
+ keyTopic = tag.MustNewKey("topic")
+ keySubscription = tag.MustNewKey("subscription")
+)
+
+// In the following, errors are used if status is not "OK".
+var (
+ keyStatus = tag.MustNewKey("status")
+ keyError = tag.MustNewKey("error")
+)
+
+const statsPrefix = "cloud.google.com/go/pubsub/"
+
+// The following are measures recorded in publish/subscribe flows.
+var (
+ // PublishedMessages is a measure of the number of messages published, which may include errors.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ PublishedMessages = stats.Int64(statsPrefix+"published_messages", "Number of PubSub message published", stats.UnitDimensionless)
+
+ // PublishLatency is a measure of the number of milliseconds it took to publish a bundle,
+ // which may consist of one or more messages.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ PublishLatency = stats.Float64(statsPrefix+"publish_roundtrip_latency", "The latency in milliseconds per publish batch", stats.UnitMilliseconds)
+
+ // PullCount is a measure of the number of messages pulled.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ PullCount = stats.Int64(statsPrefix+"pull_count", "Number of PubSub messages pulled", stats.UnitDimensionless)
+
+ // AckCount is a measure of the number of messages acked.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ AckCount = stats.Int64(statsPrefix+"ack_count", "Number of PubSub messages acked", stats.UnitDimensionless)
+
+ // NackCount is a measure of the number of messages nacked.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ NackCount = stats.Int64(statsPrefix+"nack_count", "Number of PubSub messages nacked", stats.UnitDimensionless)
+
+ // ModAckCount is a measure of the number of messages whose ack-deadline was modified.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ ModAckCount = stats.Int64(statsPrefix+"mod_ack_count", "Number of ack-deadlines modified", stats.UnitDimensionless)
+
+ // ModAckTimeoutCount is a measure of the number ModifyAckDeadline RPCs that timed out.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ ModAckTimeoutCount = stats.Int64(statsPrefix+"mod_ack_timeout_count", "Number of ModifyAckDeadline RPCs that timed out", stats.UnitDimensionless)
+
+ // StreamOpenCount is a measure of the number of times a streaming-pull stream was opened.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamOpenCount = stats.Int64(statsPrefix+"stream_open_count", "Number of calls opening a new streaming pull", stats.UnitDimensionless)
+
+ // StreamRetryCount is a measure of the number of times a streaming-pull operation was retried.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamRetryCount = stats.Int64(statsPrefix+"stream_retry_count", "Number of retries of a stream send or receive", stats.UnitDimensionless)
+
+ // StreamRequestCount is a measure of the number of requests sent on a streaming-pull stream.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamRequestCount = stats.Int64(statsPrefix+"stream_request_count", "Number gRPC StreamingPull request messages sent", stats.UnitDimensionless)
+
+ // StreamResponseCount is a measure of the number of responses received on a streaming-pull stream.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitDimensionless)
+)
+
+var (
+ // PublishedMessagesView is a cumulative sum of PublishedMessages.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ PublishedMessagesView *view.View
+
+ // PublishLatencyView is a distribution of PublishLatency.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ PublishLatencyView *view.View
+
+ // PullCountView is a cumulative sum of PullCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ PullCountView *view.View
+
+ // AckCountView is a cumulative sum of AckCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ AckCountView *view.View
+
+ // NackCountView is a cumulative sum of NackCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ NackCountView *view.View
+
+ // ModAckCountView is a cumulative sum of ModAckCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ ModAckCountView *view.View
+
+ // ModAckTimeoutCountView is a cumulative sum of ModAckTimeoutCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ ModAckTimeoutCountView *view.View
+
+ // StreamOpenCountView is a cumulative sum of StreamOpenCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamOpenCountView *view.View
+
+ // StreamRetryCountView is a cumulative sum of StreamRetryCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamRetryCountView *view.View
+
+ // StreamRequestCountView is a cumulative sum of StreamRequestCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamRequestCountView *view.View
+
+ // StreamResponseCountView is a cumulative sum of StreamResponseCount.
+ // It is EXPERIMENTAL and subject to change or removal without notice.
+ StreamResponseCountView *view.View
+)
+
+func init() {
+ PublishedMessagesView = createCountView(stats.Measure(PublishedMessages), keyTopic, keyStatus, keyError)
+ PublishLatencyView = createDistView(PublishLatency, keyTopic, keyStatus, keyError)
+ PullCountView = createCountView(PullCount, keySubscription)
+ AckCountView = createCountView(AckCount, keySubscription)
+ NackCountView = createCountView(NackCount, keySubscription)
+ ModAckCountView = createCountView(ModAckCount, keySubscription)
+ ModAckTimeoutCountView = createCountView(ModAckTimeoutCount, keySubscription)
+ StreamOpenCountView = createCountView(StreamOpenCount, keySubscription)
+ StreamRetryCountView = createCountView(StreamRetryCount, keySubscription)
+ StreamRequestCountView = createCountView(StreamRequestCount, keySubscription)
+ StreamResponseCountView = createCountView(StreamResponseCount, keySubscription)
+
+ DefaultPublishViews = []*view.View{
+ PublishedMessagesView,
+ PublishLatencyView,
+ }
+
+ DefaultSubscribeViews = []*view.View{
+ PullCountView,
+ AckCountView,
+ NackCountView,
+ ModAckCountView,
+ ModAckTimeoutCountView,
+ StreamOpenCountView,
+ StreamRetryCountView,
+ StreamRequestCountView,
+ StreamResponseCountView,
+ }
+}
+
+// The following arrays are the default views related to publish/subscribe operations provided by this package.
+// It is EXPERIMENTAL and subject to change or removal without notice.
+var (
+ DefaultPublishViews []*view.View
+ DefaultSubscribeViews []*view.View
+)
+
+func createCountView(m stats.Measure, keys ...tag.Key) *view.View {
+ return &view.View{
+ Name: m.Name(),
+ Description: m.Description(),
+ TagKeys: keys,
+ Measure: m,
+ Aggregation: view.Sum(),
+ }
+}
+
+func createDistView(m stats.Measure, keys ...tag.Key) *view.View {
+ return &view.View{
+ Name: m.Name(),
+ Description: m.Description(),
+ TagKeys: keys,
+ Measure: m,
+ Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000),
+ }
+}
+
+var logOnce sync.Once
+
+// withSubscriptionKey returns a new context modified with the subscriptionKey tag map.
+func withSubscriptionKey(ctx context.Context, subName string) context.Context {
+ ctx, err := tag.New(ctx, tag.Upsert(keySubscription, subName))
+ if err != nil {
+ logOnce.Do(func() {
+ log.Printf("pubsub: error creating tag map for 'subscribe' key: %v", err)
+ })
+ }
+ return ctx
+}
+
+func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) {
+ stats.Record(ctx, m.M(n))
+}
diff --git a/vendor/github.com/RichardKnop/logging/.gitignore b/vendor/github.com/RichardKnop/logging/.gitignore
new file mode 100644
index 000000000..3ea99d6c9
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/.gitignore
@@ -0,0 +1 @@
+coverage*
diff --git a/vendor/github.com/RichardKnop/logging/.travis.yml b/vendor/github.com/RichardKnop/logging/.travis.yml
new file mode 100644
index 000000000..91d6112e2
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/.travis.yml
@@ -0,0 +1,17 @@
+---
+language: go
+
+go:
+ - 1.11.x
+
+env:
+ - GO111MODULE=on
+
+services:
+ - docker
+
+script:
+ - make test-with-coverage
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/RichardKnop/logging/LICENSE b/vendor/github.com/RichardKnop/logging/LICENSE
new file mode 100644
index 000000000..c33dcc7c9
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/RichardKnop/logging/Makefile b/vendor/github.com/RichardKnop/logging/Makefile
new file mode 100644
index 000000000..ddf04ca60
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/Makefile
@@ -0,0 +1,34 @@
+.PHONY: update-deps install-deps fmt lint golint test test-with-coverage
+# TODO: When Go 1.9 is released vendor folder should be ignored automatically
+PACKAGES=`go list ./... | grep -v vendor | grep -v mocks`
+
+fmt:
+ for pkg in ${PACKAGES}; do \
+ go fmt $$pkg; \
+ done;
+
+lint:
+ gometalinter --exclude=vendor/ --tests --config=gometalinter.json --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode ./...
+
+golint:
+ for pkg in ${PACKAGES}; do \
+ golint $$pkg; \
+ done;
+
+test:
+ TEST_FAILED= ; \
+ for pkg in ${PACKAGES}; do \
+ go test $$pkg || TEST_FAILED=1; \
+ done; \
+ [ -z "$$TEST_FAILED" ]
+
+test-with-coverage:
+ echo "" > coverage.out
+ echo "mode: set" > coverage-all.out
+ TEST_FAILED= ; \
+ for pkg in ${PACKAGES}; do \
+ go test -coverprofile=coverage.out -covermode=set $$pkg || TEST_FAILED=1; \
+ tail -n +2 coverage.out >> coverage-all.out; \
+ done; \
+ [ -z "$$TEST_FAILED" ]
+ #go tool cover -html=coverage-all.out
diff --git a/vendor/github.com/RichardKnop/logging/README.md b/vendor/github.com/RichardKnop/logging/README.md
new file mode 100644
index 000000000..40a28cf5a
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/README.md
@@ -0,0 +1,58 @@
+## Logging
+
+A simple leveled logging library with coloured output.
+
+[](https://travis-ci.org/RichardKnop/logging)
+[](http://godoc.org/github.com/RichardKnop/logging)
+[](https://codecov.io/gh/RichardKnop/logging)
+
+---
+
+Log levels:
+
+- `INFO` (blue)
+- `WARNING` (pink)
+- `ERROR` (red)
+- `FATAL` (red)
+
+Formatters:
+
+- `DefaultFormatter`
+- `ColouredFormatter`
+
+Example usage. Create a new package `log` in your app such that:
+
+```go
+package log
+
+import (
+ "github.com/RichardKnop/logging"
+)
+
+var (
+ logger = logging.New(nil, nil, new(logging.ColouredFormatter))
+
+ // INFO ...
+ INFO = logger[logging.INFO]
+ // WARNING ...
+ WARNING = logger[logging.WARNING]
+ // ERROR ...
+ ERROR = logger[logging.ERROR]
+ // FATAL ...
+ FATAL = logger[logging.FATAL]
+)
+```
+
+Then from your app you could do:
+
+```go
+package main
+
+import (
+ "github.com/yourusername/yourapp/log"
+)
+
+func main() {
+ log.INFO.Print("log message")
+}
+```
diff --git a/vendor/github.com/RichardKnop/logging/coloured_formatter.go b/vendor/github.com/RichardKnop/logging/coloured_formatter.go
new file mode 100644
index 000000000..c110500e4
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/coloured_formatter.go
@@ -0,0 +1,40 @@
+package logging
+
+import (
+ "fmt"
+)
+
+const (
+ // For colouring
+ resetSeq = "\033[0m"
+ colourSeq = "\033[0;%dm"
+)
+
+// Colour map
+var colour = map[level]string{
+ INFO: fmt.Sprintf(colourSeq, 94), // blue
+ WARNING: fmt.Sprintf(colourSeq, 95), // pink
+ ERROR: fmt.Sprintf(colourSeq, 91), // red
+ FATAL: fmt.Sprintf(colourSeq, 91), // red
+}
+
+// ColouredFormatter colours log messages with ASCI escape codes
+// and adds filename and line number before the log message
+// See https://en.wikipedia.org/wiki/ANSI_escape_code
+type ColouredFormatter struct {
+}
+
+// GetPrefix returns colour escape code
+func (f *ColouredFormatter) GetPrefix(lvl level) string {
+ return colour[lvl]
+}
+
+// GetSuffix returns reset sequence code
+func (f *ColouredFormatter) GetSuffix(lvl level) string {
+ return resetSeq
+}
+
+// Format adds filename and line number before the log message
+func (f *ColouredFormatter) Format(lvl level, v ...interface{}) []interface{} {
+ return append([]interface{}{header()}, v...)
+}
diff --git a/vendor/github.com/RichardKnop/logging/default_formatter.go b/vendor/github.com/RichardKnop/logging/default_formatter.go
new file mode 100644
index 000000000..e33d91e31
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/default_formatter.go
@@ -0,0 +1,20 @@
+package logging
+
+// DefaultFormatter adds filename and line number before the log message
+type DefaultFormatter struct {
+}
+
+// GetPrefix returns ""
+func (f *DefaultFormatter) GetPrefix(lvl level) string {
+ return ""
+}
+
+// GetSuffix returns ""
+func (f *DefaultFormatter) GetSuffix(lvl level) string {
+ return ""
+}
+
+// Format adds filename and line number before the log message
+func (f *DefaultFormatter) Format(lvl level, v ...interface{}) []interface{} {
+ return append([]interface{}{header()}, v...)
+}
diff --git a/vendor/github.com/RichardKnop/logging/formatter_interface.go b/vendor/github.com/RichardKnop/logging/formatter_interface.go
new file mode 100644
index 000000000..54656999e
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/formatter_interface.go
@@ -0,0 +1,30 @@
+package logging
+
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+)
+
+const (
+ // Runtime caller depth
+ depth = 3
+)
+
+// Formatter interface
+type Formatter interface {
+ GetPrefix(lvl level) string
+ Format(lvl level, v ...interface{}) []interface{}
+ GetSuffix(lvl level) string
+}
+
+// Returns header including filename and line number
+func header() string {
+ _, fn, line, ok := runtime.Caller(depth)
+ if !ok {
+ fn = "???"
+ line = 1
+ }
+
+ return fmt.Sprintf("%s:%d ", filepath.Base(fn), line)
+}
diff --git a/vendor/github.com/RichardKnop/logging/go.mod b/vendor/github.com/RichardKnop/logging/go.mod
new file mode 100644
index 000000000..26e2e6a1a
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/go.mod
@@ -0,0 +1,7 @@
+module github.com/RichardKnop/logging
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+)
diff --git a/vendor/github.com/RichardKnop/logging/go.sum b/vendor/github.com/RichardKnop/logging/go.sum
new file mode 100644
index 000000000..e03ee77d9
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/go.sum
@@ -0,0 +1,6 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
diff --git a/vendor/github.com/RichardKnop/logging/gometalinter.json b/vendor/github.com/RichardKnop/logging/gometalinter.json
new file mode 100644
index 000000000..22f71572d
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/gometalinter.json
@@ -0,0 +1,9 @@
+{
+ "Linters":
+ {
+ "vet":
+ {
+ "Command": "go tool vet"
+ }
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/RichardKnop/logging/interface.go b/vendor/github.com/RichardKnop/logging/interface.go
new file mode 100644
index 000000000..2e655f706
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/interface.go
@@ -0,0 +1,17 @@
+package logging
+
+// LoggerInterface will accept stdlib logger and a custom logger.
+// There's no standard interface, this is the closest we get, unfortunately.
+type LoggerInterface interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
diff --git a/vendor/github.com/RichardKnop/logging/logger.go b/vendor/github.com/RichardKnop/logging/logger.go
new file mode 100644
index 000000000..ff356c736
--- /dev/null
+++ b/vendor/github.com/RichardKnop/logging/logger.go
@@ -0,0 +1,134 @@
+package logging
+
+import (
+ "io"
+ "log"
+ "os"
+)
+
+// Level type
+type level int
+
+const (
+ // DEBUG level
+ DEBUG level = iota
+ // INFO level
+ INFO
+ // WARNING level
+ WARNING
+ // ERROR level
+ ERROR
+ // FATAL level
+ FATAL
+
+ flag = log.Ldate | log.Ltime
+)
+
+// Log level prefix map
+var prefix = map[level]string{
+ DEBUG: "DEBUG: ",
+ INFO: "INFO: ",
+ WARNING: "WARNING: ",
+ ERROR: "ERROR: ",
+ FATAL: "FATAL: ",
+}
+
+// Logger ...
+type Logger map[level]LoggerInterface
+
+// New returns instance of Logger
+func New(out, errOut io.Writer, f Formatter) Logger {
+ // Fall back to stdout if out not set
+ if out == nil {
+ out = os.Stdout
+ }
+
+ // Fall back to stderr if errOut not set
+ if errOut == nil {
+ errOut = os.Stderr
+ }
+
+ // Fall back to DefaultFormatter if f not set
+ if f == nil {
+ f = new(DefaultFormatter)
+ }
+
+ l := make(map[level]LoggerInterface, 5)
+ l[DEBUG] = &Wrapper{lvl: DEBUG, formatter: f, logger: log.New(out, f.GetPrefix(DEBUG)+prefix[DEBUG], flag)}
+ l[INFO] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(out, f.GetPrefix(INFO)+prefix[INFO], flag)}
+ l[WARNING] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(out, f.GetPrefix(WARNING)+prefix[WARNING], flag)}
+ l[ERROR] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(errOut, f.GetPrefix(ERROR)+prefix[ERROR], flag)}
+ l[FATAL] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(errOut, f.GetPrefix(FATAL)+prefix[FATAL], flag)}
+
+ return Logger(l)
+}
+
+// Wrapper ...
+type Wrapper struct {
+ lvl level
+ formatter Formatter
+ logger LoggerInterface
+}
+
+// Print ...
+func (w *Wrapper) Print(v ...interface{}) {
+ v = w.formatter.Format(w.lvl, v...)
+ v = append(v, w.formatter.GetSuffix(w.lvl))
+ w.logger.Print(v...)
+}
+
+// Printf ...
+func (w *Wrapper) Printf(format string, v ...interface{}) {
+ suffix := w.formatter.GetSuffix(w.lvl)
+ v = w.formatter.Format(w.lvl, v...)
+ w.logger.Printf("%s"+format+suffix, v...)
+}
+
+// Println ...
+func (w *Wrapper) Println(v ...interface{}) {
+ v = w.formatter.Format(w.lvl, v...)
+ v = append(v, w.formatter.GetSuffix(w.lvl))
+ w.logger.Println(v...)
+}
+
+// Fatal ...
+func (w *Wrapper) Fatal(v ...interface{}) {
+ v = w.formatter.Format(w.lvl, v...)
+ v = append(v, w.formatter.GetSuffix(w.lvl))
+ w.logger.Fatal(v...)
+}
+
+// Fatalf ...
+func (w *Wrapper) Fatalf(format string, v ...interface{}) {
+ suffix := w.formatter.GetSuffix(w.lvl)
+ v = w.formatter.Format(w.lvl, v...)
+ w.logger.Fatalf("%s"+format+suffix, v...)
+}
+
+// Fatalln ...
+func (w *Wrapper) Fatalln(v ...interface{}) {
+ v = w.formatter.Format(w.lvl, v...)
+ v = append(v, w.formatter.GetSuffix(w.lvl))
+ w.logger.Fatalln(v...)
+}
+
+// Panic ...
+func (w *Wrapper) Panic(v ...interface{}) {
+ v = w.formatter.Format(w.lvl, v...)
+ v = append(v, w.formatter.GetSuffix(w.lvl))
+ w.logger.Fatal(v...)
+}
+
+// Panicf ...
+func (w *Wrapper) Panicf(format string, v ...interface{}) {
+ suffix := w.formatter.GetSuffix(w.lvl)
+ v = w.formatter.Format(w.lvl, v...)
+ w.logger.Panicf("%s"+format+suffix, v...)
+}
+
+// Panicln ...
+func (w *Wrapper) Panicln(v ...interface{}) {
+ v = w.formatter.Format(w.lvl, v...)
+ v = append(v, w.formatter.GetSuffix(w.lvl))
+ w.logger.Panicln(v...)
+}
diff --git a/vendor/github.com/RichardKnop/machinery/LICENSE b/vendor/github.com/RichardKnop/machinery/LICENSE
new file mode 100644
index 000000000..c33dcc7c9
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/amqp/amqp.go b/vendor/github.com/RichardKnop/machinery/v1/backends/amqp/amqp.go
new file mode 100644
index 000000000..0ca650ba7
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/amqp/amqp.go
@@ -0,0 +1,393 @@
+package amqp
+
+// NOTE: Using AMQP as a result backend is quite tricky since every time we
+// read a message from the queue keeping task states, the message is removed
+// from the queue. This leads to problems with keeping a reliable state of a
+// group of tasks since concurrent processes updating the group state cause
+// race conditions and inconsistent state.
+//
+// This is avoided by a "clever" hack. A special queue identified by a group
+// UUID is created and we store serialised TaskState objects of successfully
+// completed tasks. By inspecting the queue we can then say:
+// 1) If all group tasks finished (number of unacked messages = group task count)
+// 2) If all group tasks finished AND succeeded (by consuming the queue)
+//
+// It is important to consume the queue exclusively to avoid race conditions.
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/streadway/amqp"
+)
+
+// Backend represents an AMQP result backend
+type Backend struct {
+ common.Backend
+ common.AMQPConnector
+}
+
+// New creates Backend instance
+func New(cnf *config.Config) iface.Backend {
+ return &Backend{Backend: common.NewBackend(cnf), AMQPConnector: common.AMQPConnector{}}
+}
+
+// InitGroup creates and saves a group meta data object
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ return nil
+}
+
+// GroupCompleted returns true if all tasks in a group finished
+// NOTE: Given AMQP limitation this will only return true if all finished
+// tasks were successful as we do not keep track of completed failed tasks
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
+ if err != nil {
+ return false, err
+ }
+ defer b.Close(channel, conn)
+
+ queueState, err := b.InspectQueue(channel, groupUUID)
+ if err != nil {
+ return false, nil
+ }
+
+ return queueState.Messages == groupTaskCount, nil
+}
+
+// GroupTaskStates returns states of all tasks in the group
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+ defer b.Close(channel, conn)
+
+ queueState, err := b.InspectQueue(channel, groupUUID)
+ if err != nil {
+ return nil, err
+ }
+
+ if queueState.Messages != groupTaskCount {
+ return nil, fmt.Errorf("Already consumed: %v", err)
+ }
+
+ deliveries, err := channel.Consume(
+ groupUUID, // queue name
+ "", // consumer tag
+ false, // auto-ack
+ true, // exclusive
+ false, // no-local
+ false, // no-wait
+ nil, // arguments
+ )
+ if err != nil {
+ return nil, fmt.Errorf("Queue consume error: %s", err)
+ }
+
+ states := make([]*tasks.TaskState, groupTaskCount)
+ for i := 0; i < groupTaskCount; i++ {
+ d := <-deliveries
+
+ state := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body)))
+ decoder.UseNumber()
+ if err := decoder.Decode(state); err != nil {
+ d.Nack(false, false) // multiple, requeue
+ return nil, err
+ }
+
+ d.Ack(false) // multiple
+
+ states[i] = state
+ }
+
+ return states, nil
+}
+
+// TriggerChord flags chord as triggered in the backend storage to make sure
+// chord is never trigerred multiple times. Returns a boolean flag to indicate
+// whether the worker should trigger chord (true) or no if it has been triggered
+// already (false)
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
+ if err != nil {
+ return false, err
+ }
+ defer b.Close(channel, conn)
+
+ _, err = b.InspectQueue(channel, amqmChordTriggeredQueue(groupUUID))
+ if err != nil {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// SetStatePending updates task state to PENDING
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ taskState := tasks.NewPendingTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateReceived updates task state to RECEIVED
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ taskState := tasks.NewReceivedTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateStarted updates task state to STARTED
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ taskState := tasks.NewStartedTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateRetry updates task state to RETRY
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ state := tasks.NewRetryTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateSuccess updates task state to SUCCESS
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ taskState := tasks.NewSuccessTaskState(signature, results)
+
+ if err := b.updateState(taskState); err != nil {
+ return err
+ }
+
+ if signature.GroupUUID == "" {
+ return nil
+ }
+
+ return b.markTaskCompleted(signature, taskState)
+}
+
+// SetStateFailure updates task state to FAILURE
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ taskState := tasks.NewFailureTaskState(signature, err)
+
+ if err := b.updateState(taskState); err != nil {
+ return err
+ }
+
+ if signature.GroupUUID == "" {
+ return nil
+ }
+
+ return b.markTaskCompleted(signature, taskState)
+}
+
+// GetState returns the latest task state. It will only return the status once
+// as the message will get consumed and removed from the queue.
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ declareQueueArgs := amqp.Table{
+ // Time in milliseconds
+ // after that message will expire
+ "x-message-ttl": int32(b.getExpiresIn()),
+ // Time after that the queue will be deleted.
+ "x-expires": int32(b.getExpiresIn()),
+ }
+ conn, channel, _, _, _, err := b.Connect(
+ b.GetConfig().ResultBackend,
+ b.GetConfig().TLSConfig,
+ b.GetConfig().AMQP.Exchange, // exchange name
+ b.GetConfig().AMQP.ExchangeType, // exchange type
+ taskUUID, // queue name
+ false, // queue durable
+ true, // queue delete when unused
+ taskUUID, // queue binding key
+ nil, // exchange declare args
+ declareQueueArgs, // queue declare args
+ nil, // queue binding args
+ )
+ if err != nil {
+ return nil, err
+ }
+ defer b.Close(channel, conn)
+
+ d, ok, err := channel.Get(
+ taskUUID, // queue name
+ false, // multiple
+ )
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ return nil, errors.New("No state ready")
+ }
+
+ d.Ack(false)
+
+ state := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body)))
+ decoder.UseNumber()
+ if err := decoder.Decode(state); err != nil {
+ log.ERROR.Printf("Failed to unmarshal task state: %s", string(d.Body))
+ log.ERROR.Print(err)
+ return nil, err
+ }
+
+ return state, nil
+}
+
+// PurgeState deletes stored task state
+func (b *Backend) PurgeState(taskUUID string) error {
+ conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
+ if err != nil {
+ return err
+ }
+ defer b.Close(channel, conn)
+
+ return b.DeleteQueue(channel, taskUUID)
+}
+
+// PurgeGroupMeta deletes stored group meta data
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig)
+ if err != nil {
+ return err
+ }
+ defer b.Close(channel, conn)
+
+ b.DeleteQueue(channel, amqmChordTriggeredQueue(groupUUID))
+
+ return b.DeleteQueue(channel, groupUUID)
+}
+
+// updateState saves current task state
+func (b *Backend) updateState(taskState *tasks.TaskState) error {
+ message, err := json.Marshal(taskState)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ declareQueueArgs := amqp.Table{
+ // Time in milliseconds
+ // after that message will expire
+ "x-message-ttl": int32(b.getExpiresIn()),
+ // Time after that the queue will be deleted.
+ "x-expires": int32(b.getExpiresIn()),
+ }
+ conn, channel, queue, confirmsChan, _, err := b.Connect(
+ b.GetConfig().ResultBackend,
+ b.GetConfig().TLSConfig,
+ b.GetConfig().AMQP.Exchange, // exchange name
+ b.GetConfig().AMQP.ExchangeType, // exchange type
+ taskState.TaskUUID, // queue name
+ false, // queue durable
+ true, // queue delete when unused
+ taskState.TaskUUID, // queue binding key
+ nil, // exchange declare args
+ declareQueueArgs, // queue declare args
+ nil, // queue binding args
+ )
+ if err != nil {
+ return err
+ }
+ defer b.Close(channel, conn)
+
+ if err := channel.Publish(
+ b.GetConfig().AMQP.Exchange, // exchange
+ queue.Name, // routing key
+ false, // mandatory
+ false, // immediate
+ amqp.Publishing{
+ ContentType: "application/json",
+ Body: message,
+ DeliveryMode: amqp.Persistent, // Persistent // Transient
+ },
+ ); err != nil {
+ return err
+ }
+
+ confirmed := <-confirmsChan
+
+ if confirmed.Ack {
+ return nil
+ }
+
+ return fmt.Errorf("Failed delivery of delivery tag: %d", confirmed.DeliveryTag)
+}
+
+// getExpiresIn returns expiration time
+func (b *Backend) getExpiresIn() int {
+ resultsExpireIn := b.GetConfig().ResultsExpireIn * 1000
+ if resultsExpireIn == 0 {
+ // // expire results after 1 hour by default
+ resultsExpireIn = config.DefaultResultsExpireIn * 1000
+ }
+ return resultsExpireIn
+}
+
+// markTaskCompleted marks task as completed in either groupdUUID_success
+// or groupUUID_failure queue. This is important for GroupCompleted and
+// GroupSuccessful methods
+func (b *Backend) markTaskCompleted(signature *tasks.Signature, taskState *tasks.TaskState) error {
+ if signature.GroupUUID == "" || signature.GroupTaskCount == 0 {
+ return nil
+ }
+
+ message, err := json.Marshal(taskState)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ declareQueueArgs := amqp.Table{
+ // Time in milliseconds
+ // after that message will expire
+ "x-message-ttl": int32(b.getExpiresIn()),
+ // Time after that the queue will be deleted.
+ "x-expires": int32(b.getExpiresIn()),
+ }
+ conn, channel, queue, confirmsChan, _, err := b.Connect(
+ b.GetConfig().ResultBackend,
+ b.GetConfig().TLSConfig,
+ b.GetConfig().AMQP.Exchange, // exchange name
+ b.GetConfig().AMQP.ExchangeType, // exchange type
+ signature.GroupUUID, // queue name
+ false, // queue durable
+ true, // queue delete when unused
+ signature.GroupUUID, // queue binding key
+ nil, // exchange declare args
+ declareQueueArgs, // queue declare args
+ nil, // queue binding args
+ )
+ if err != nil {
+ return err
+ }
+ defer b.Close(channel, conn)
+
+ if err := channel.Publish(
+ b.GetConfig().AMQP.Exchange, // exchange
+ queue.Name, // routing key
+ false, // mandatory
+ false, // immediate
+ amqp.Publishing{
+ ContentType: "application/json",
+ Body: message,
+ DeliveryMode: amqp.Persistent, // Persistent // Transient
+ },
+ ); err != nil {
+ return err
+ }
+
+ confirmed := <-confirmsChan
+
+ if !confirmed.Ack {
+ return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag)
+ }
+
+ return nil
+}
+
+func amqmChordTriggeredQueue(groupUUID string) string {
+ return fmt.Sprintf("%s_chord_triggered", groupUUID)
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/dynamodb/dynamodb.go b/vendor/github.com/RichardKnop/machinery/v1/backends/dynamodb/dynamodb.go
new file mode 100644
index 000000000..34d5be6f1
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/dynamodb/dynamodb.go
@@ -0,0 +1,512 @@
+package dynamodb
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/session"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
+ "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
+)
+
+// Backend ...
+type Backend struct {
+ common.Backend
+ cnf *config.Config
+ client dynamodbiface.DynamoDBAPI
+}
+
+// New creates a Backend instance
+func New(cnf *config.Config) iface.Backend {
+ backend := &Backend{Backend: common.NewBackend(cnf), cnf: cnf}
+
+ if cnf.DynamoDB != nil && cnf.DynamoDB.Client != nil {
+ backend.client = cnf.DynamoDB.Client
+ } else {
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+ backend.client = dynamodb.New(sess)
+ }
+
+ // Check if needed tables exist
+ err := backend.checkRequiredTablesIfExist()
+ if err != nil {
+ log.FATAL.Printf("Failed to prepare tables. Error: %v", err)
+ }
+ return backend
+}
+
+// InitGroup ...
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ meta := tasks.GroupMeta{
+ GroupUUID: groupUUID,
+ TaskUUIDs: taskUUIDs,
+ CreatedAt: time.Now().UTC(),
+ }
+ av, err := dynamodbattribute.MarshalMap(meta)
+ if err != nil {
+ log.ERROR.Printf("Error when marshaling Dynamodb attributes. Err: %v", err)
+ return err
+ }
+ input := &dynamodb.PutItemInput{
+ Item: av,
+ TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
+ }
+ _, err = b.client.PutItem(input)
+
+ if err != nil {
+ log.ERROR.Printf("Got error when calling PutItem: %v; Error: %v", input, err)
+ return err
+ }
+ return nil
+}
+
+// GroupCompleted ...
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return false, err
+ }
+ taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
+ if err != nil {
+ return false, err
+ }
+ var countSuccessTasks = 0
+ for _, taskState := range taskStates {
+ if taskState.IsCompleted() {
+ countSuccessTasks++
+ }
+ }
+
+ return countSuccessTasks == groupTaskCount, nil
+}
+
+// GroupTaskStates ...
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return nil, err
+ }
+
+ return b.getStates(groupMeta.TaskUUIDs...)
+}
+
+// TriggerChord ...
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ // Get the group meta data
+ groupMeta, err := b.getGroupMeta(groupUUID)
+
+ if err != nil {
+ return false, err
+ }
+
+ // Chord has already been triggered, return false (should not trigger again)
+ if groupMeta.ChordTriggered {
+ return false, nil
+ }
+
+ // If group meta is locked, wait until it's unlocked
+ for groupMeta.Lock {
+ groupMeta, _ = b.getGroupMeta(groupUUID)
+ log.WARNING.Print("Group meta locked, waiting")
+ time.Sleep(time.Millisecond * 5)
+ }
+
+ // Acquire lock
+ if err = b.lockGroupMeta(groupUUID); err != nil {
+ return false, err
+ }
+ defer b.unlockGroupMeta(groupUUID)
+
+ // update group meta data
+ err = b.chordTriggered(groupUUID)
+ if err != nil {
+ return false, err
+ }
+ return true, err
+}
+
+// SetStatePending ...
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ taskState := tasks.NewPendingTaskState(signature)
+ // taskUUID is the primary key of the table, so a new task need to be created first, instead of using dynamodb.UpdateItemInput directly
+ return b.initTaskState(taskState)
+}
+
+// SetStateReceived ...
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ taskState := tasks.NewReceivedTaskState(signature)
+ return b.setTaskState(taskState)
+}
+
+// SetStateStarted ...
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ taskState := tasks.NewStartedTaskState(signature)
+ return b.setTaskState(taskState)
+}
+
+// SetStateRetry ...
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ taskState := tasks.NewRetryTaskState(signature)
+ return b.setTaskState(taskState)
+}
+
+// SetStateSuccess ...
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ taskState := tasks.NewSuccessTaskState(signature, results)
+ return b.setTaskState(taskState)
+}
+
+// SetStateFailure ...
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ taskState := tasks.NewFailureTaskState(signature, err)
+ return b.updateToFailureStateWithError(taskState)
+}
+
+// GetState ...
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ result, err := b.client.GetItem(&dynamodb.GetItemInput{
+ TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
+ Key: map[string]*dynamodb.AttributeValue{
+ "TaskUUID": {
+ S: aws.String(taskUUID),
+ },
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return b.unmarshalTaskStateGetItemResult(result)
+}
+
+// PurgeState ...
+func (b *Backend) PurgeState(taskUUID string) error {
+ input := &dynamodb.DeleteItemInput{
+ Key: map[string]*dynamodb.AttributeValue{
+ "TaskUUID": {
+ N: aws.String(taskUUID),
+ },
+ },
+ TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
+ }
+ _, err := b.client.DeleteItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// PurgeGroupMeta ...
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ input := &dynamodb.DeleteItemInput{
+ Key: map[string]*dynamodb.AttributeValue{
+ "GroupUUID": {
+ N: aws.String(groupUUID),
+ },
+ },
+ TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
+ }
+ _, err := b.client.DeleteItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
+ result, err := b.client.GetItem(&dynamodb.GetItemInput{
+ TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
+ Key: map[string]*dynamodb.AttributeValue{
+ "GroupUUID": {
+ S: aws.String(groupUUID),
+ },
+ },
+ })
+ if err != nil {
+ log.ERROR.Printf("Error when getting group meta. Error: %v", err)
+ return nil, err
+ }
+ item, err := b.unmarshalGroupMetaGetItemResult(result)
+ if err != nil {
+ log.INFO.Println("!!!", result)
+ log.ERROR.Printf("Failed to unmarshal item, %v", err)
+ return nil, err
+ }
+ return item, nil
+}
+
+func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
+ var states []*tasks.TaskState
+ stateChan := make(chan *tasks.TaskState, len(taskUUIDs))
+ errChan := make(chan error)
+ // There is no method like querying items by `in` a list of primary keys.
+ // So a for loop with go routine is used to get multiple items
+ for _, id := range taskUUIDs {
+ go func(id string) {
+ state, err := b.GetState(id)
+ if err != nil {
+ errChan <- err
+ }
+ stateChan <- state
+ }(id)
+ }
+
+ for s := range stateChan {
+ states = append(states, s)
+ if len(states) == len(taskUUIDs) {
+ close(stateChan)
+ }
+ }
+ return states, nil
+}
+
+func (b *Backend) lockGroupMeta(groupUUID string) error {
+ err := b.updateGroupMetaLock(groupUUID, true)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) unlockGroupMeta(groupUUID string) error {
+ err := b.updateGroupMetaLock(groupUUID, false)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) updateGroupMetaLock(groupUUID string, status bool) error {
+ input := &dynamodb.UpdateItemInput{
+ ExpressionAttributeNames: map[string]*string{
+ "#L": aws.String("Lock"),
+ },
+ ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ":l": {
+ BOOL: aws.Bool(status),
+ },
+ },
+ Key: map[string]*dynamodb.AttributeValue{
+ "GroupUUID": {
+ S: aws.String(groupUUID),
+ },
+ },
+ ReturnValues: aws.String("UPDATED_NEW"),
+ TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
+ UpdateExpression: aws.String("SET #L = :l"),
+ }
+
+ _, err := b.client.UpdateItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) chordTriggered(groupUUID string) error {
+ input := &dynamodb.UpdateItemInput{
+ ExpressionAttributeNames: map[string]*string{
+ "#CT": aws.String("ChordTriggered"),
+ },
+ ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ":ct": {
+ BOOL: aws.Bool(true),
+ },
+ },
+ Key: map[string]*dynamodb.AttributeValue{
+ "GroupUUID": {
+ S: aws.String(groupUUID),
+ },
+ },
+ ReturnValues: aws.String("UPDATED_NEW"),
+ TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable),
+ UpdateExpression: aws.String("SET #CT = :ct"),
+ }
+
+ _, err := b.client.UpdateItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) setTaskState(taskState *tasks.TaskState) error {
+ expAttributeNames := map[string]*string{
+ "#S": aws.String("State"),
+ }
+ expAttributeValues := map[string]*dynamodb.AttributeValue{
+ ":s": {
+ S: aws.String(taskState.State),
+ },
+ }
+ keyAttributeValues := map[string]*dynamodb.AttributeValue{
+ "TaskUUID": {
+ S: aws.String(taskState.TaskUUID),
+ },
+ }
+ exp := "SET #S = :s"
+ if !taskState.CreatedAt.IsZero() {
+ expAttributeNames["#C"] = aws.String("CreatedAt")
+ expAttributeValues[":c"] = &dynamodb.AttributeValue{
+ S: aws.String(taskState.CreatedAt.String()),
+ }
+ exp += ", #C = :c"
+ }
+ if taskState.Results != nil && len(taskState.Results) != 0 {
+ expAttributeNames["#R"] = aws.String("Results")
+ var results []*dynamodb.AttributeValue
+ for _, r := range taskState.Results {
+ avMap := map[string]*dynamodb.AttributeValue{
+ "Type": {
+ S: aws.String(r.Type),
+ },
+ "Value": {
+ S: aws.String(fmt.Sprintf("%v", r.Value)),
+ },
+ }
+ rs := &dynamodb.AttributeValue{
+ M: avMap,
+ }
+ results = append(results, rs)
+ }
+ expAttributeValues[":r"] = &dynamodb.AttributeValue{
+ L: results,
+ }
+ exp += ", #R = :r"
+ }
+ input := &dynamodb.UpdateItemInput{
+ ExpressionAttributeNames: expAttributeNames,
+ ExpressionAttributeValues: expAttributeValues,
+ Key: keyAttributeValues,
+ ReturnValues: aws.String("UPDATED_NEW"),
+ TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
+ UpdateExpression: aws.String(exp),
+ }
+
+ _, err := b.client.UpdateItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) initTaskState(taskState *tasks.TaskState) error {
+ av, err := dynamodbattribute.MarshalMap(taskState)
+ input := &dynamodb.PutItemInput{
+ Item: av,
+ TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
+ }
+ if err != nil {
+ return err
+ }
+ _, err = b.client.PutItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) updateToFailureStateWithError(taskState *tasks.TaskState) error {
+ input := &dynamodb.UpdateItemInput{
+ ExpressionAttributeNames: map[string]*string{
+ "#S": aws.String("State"),
+ "#E": aws.String("Error"),
+ },
+ ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
+ ":s": {
+ S: aws.String(taskState.State),
+ },
+ ":e": {
+ S: aws.String(taskState.Error),
+ },
+ },
+ Key: map[string]*dynamodb.AttributeValue{
+ "TaskUUID": {
+ S: aws.String(taskState.TaskUUID),
+ },
+ },
+ ReturnValues: aws.String("UPDATED_NEW"),
+ TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable),
+ UpdateExpression: aws.String("SET #S = :s, #E = :e"),
+ }
+
+ _, err := b.client.UpdateItem(input)
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (b *Backend) unmarshalGroupMetaGetItemResult(result *dynamodb.GetItemOutput) (*tasks.GroupMeta, error) {
+ if result == nil {
+ err := errors.New("task state is nil")
+ log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
+ return nil, err
+ }
+ item := tasks.GroupMeta{}
+ err := dynamodbattribute.UnmarshalMap(result.Item, &item)
+ if err != nil {
+ log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
+ return nil, err
+ }
+ return &item, err
+}
+
+func (b *Backend) unmarshalTaskStateGetItemResult(result *dynamodb.GetItemOutput) (*tasks.TaskState, error) {
+ if result == nil {
+ err := errors.New("task state is nil")
+ log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
+ return nil, err
+ }
+ state := tasks.TaskState{}
+ err := dynamodbattribute.UnmarshalMap(result.Item, &state)
+ if err != nil {
+ log.ERROR.Printf("Got error when unmarshal map. Error: %v", err)
+ return nil, err
+ }
+ return &state, nil
+}
+
+func (b *Backend) checkRequiredTablesIfExist() error {
+ var (
+ taskTableName = b.cnf.DynamoDB.TaskStatesTable
+ groupTableName = b.cnf.DynamoDB.GroupMetasTable
+ )
+ result, err := b.client.ListTables(&dynamodb.ListTablesInput{})
+ if err != nil {
+ return err
+ }
+ if !b.tableExists(taskTableName, result.TableNames) {
+ return errors.New("task table doesn't exist")
+ }
+ if !b.tableExists(groupTableName, result.TableNames) {
+ return errors.New("group table doesn't exist")
+ }
+ return nil
+}
+
+func (b *Backend) tableExists(tableName string, tableNames []*string) bool {
+ for _, t := range tableNames {
+ if tableName == *t {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/eager/eager.go b/vendor/github.com/RichardKnop/machinery/v1/backends/eager/eager.go
new file mode 100644
index 000000000..8f48e67bf
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/eager/eager.go
@@ -0,0 +1,210 @@
+package eager
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "sync"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// ErrGroupNotFound ...
+type ErrGroupNotFound struct {
+ groupUUID string
+}
+
+// NewErrGroupNotFound returns new instance of ErrGroupNotFound
+func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound {
+ return ErrGroupNotFound{groupUUID: groupUUID}
+}
+
+// Error implements error interface
+func (e ErrGroupNotFound) Error() string {
+ return fmt.Sprintf("Group not found: %v", e.groupUUID)
+}
+
+// ErrTasknotFound ...
+type ErrTasknotFound struct {
+ taskUUID string
+}
+
+// NewErrTasknotFound returns new instance of ErrTasknotFound
+func NewErrTasknotFound(taskUUID string) ErrTasknotFound {
+ return ErrTasknotFound{taskUUID: taskUUID}
+}
+
+// Error implements error interface
+func (e ErrTasknotFound) Error() string {
+ return fmt.Sprintf("Task not found: %v", e.taskUUID)
+}
+
+// Backend represents an "eager" in-memory result backend
+type Backend struct {
+ common.Backend
+ groups map[string][]string
+ tasks map[string][]byte
+ stateMutex sync.Mutex
+}
+
+// New creates EagerBackend instance
+func New() iface.Backend {
+ return &Backend{
+ Backend: common.NewBackend(new(config.Config)),
+ groups: make(map[string][]string),
+ tasks: make(map[string][]byte),
+ }
+}
+
+// InitGroup creates and saves a group meta data object
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ tasks := make([]string, 0, len(taskUUIDs))
+ // copy every task
+ for _, v := range taskUUIDs {
+ tasks = append(tasks, v)
+ }
+
+ b.groups[groupUUID] = tasks
+ return nil
+}
+
+// GroupCompleted returns true if all tasks in a group finished
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ tasks, ok := b.groups[groupUUID]
+ if !ok {
+ return false, NewErrGroupNotFound(groupUUID)
+ }
+
+ var countSuccessTasks = 0
+ for _, v := range tasks {
+ t, err := b.GetState(v)
+ if err != nil {
+ return false, err
+ }
+
+ if t.IsCompleted() {
+ countSuccessTasks++
+ }
+ }
+
+ return countSuccessTasks == groupTaskCount, nil
+}
+
+// GroupTaskStates returns states of all tasks in the group
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ taskUUIDs, ok := b.groups[groupUUID]
+ if !ok {
+ return nil, NewErrGroupNotFound(groupUUID)
+ }
+
+ ret := make([]*tasks.TaskState, 0, groupTaskCount)
+ for _, taskUUID := range taskUUIDs {
+ t, err := b.GetState(taskUUID)
+ if err != nil {
+ return nil, err
+ }
+
+ ret = append(ret, t)
+ }
+
+ return ret, nil
+}
+
+// TriggerChord flags chord as triggered in the backend storage to make sure
+// chord is never trigerred multiple times. Returns a boolean flag to indicate
+// whether the worker should trigger chord (true) or no if it has been triggered
+// already (false)
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ return true, nil
+}
+
+// SetStatePending updates task state to PENDING
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ state := tasks.NewPendingTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateReceived updates task state to RECEIVED
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ state := tasks.NewReceivedTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateStarted updates task state to STARTED
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ state := tasks.NewStartedTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateRetry updates task state to RETRY
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ state := tasks.NewRetryTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateSuccess updates task state to SUCCESS
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ state := tasks.NewSuccessTaskState(signature, results)
+ return b.updateState(state)
+}
+
+// SetStateFailure updates task state to FAILURE
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ state := tasks.NewFailureTaskState(signature, err)
+ return b.updateState(state)
+}
+
+// GetState returns the latest task state
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ tasktStateBytes, ok := b.tasks[taskUUID]
+ if !ok {
+ return nil, NewErrTasknotFound(taskUUID)
+ }
+
+ state := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader(tasktStateBytes))
+ decoder.UseNumber()
+ if err := decoder.Decode(state); err != nil {
+ return nil, fmt.Errorf("Failed to unmarshal task state %v", b)
+ }
+
+ return state, nil
+}
+
+// PurgeState deletes stored task state
+func (b *Backend) PurgeState(taskUUID string) error {
+ _, ok := b.tasks[taskUUID]
+ if !ok {
+ return NewErrTasknotFound(taskUUID)
+ }
+
+ delete(b.tasks, taskUUID)
+ return nil
+}
+
+// PurgeGroupMeta deletes stored group meta data
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ _, ok := b.groups[groupUUID]
+ if !ok {
+ return NewErrGroupNotFound(groupUUID)
+ }
+
+ delete(b.groups, groupUUID)
+ return nil
+}
+
+func (b *Backend) updateState(s *tasks.TaskState) error {
+ // simulate the behavior of json marshal/unmarshal
+ b.stateMutex.Lock()
+ defer b.stateMutex.Unlock()
+ msg, err := json.Marshal(s)
+ if err != nil {
+ return fmt.Errorf("Marshal task state error: %v", err)
+ }
+
+ b.tasks[s.TaskUUID] = msg
+ return nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/iface/interfaces.go b/vendor/github.com/RichardKnop/machinery/v1/backends/iface/interfaces.go
new file mode 100644
index 000000000..56dff517b
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/iface/interfaces.go
@@ -0,0 +1,28 @@
+package iface
+
+import (
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// Backend - a common interface for all result backends
+type Backend interface {
+ // Group related functions
+ InitGroup(groupUUID string, taskUUIDs []string) error
+ GroupCompleted(groupUUID string, groupTaskCount int) (bool, error)
+ GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error)
+ TriggerChord(groupUUID string) (bool, error)
+
+ // Setting / getting task state
+ SetStatePending(signature *tasks.Signature) error
+ SetStateReceived(signature *tasks.Signature) error
+ SetStateStarted(signature *tasks.Signature) error
+ SetStateRetry(signature *tasks.Signature) error
+ SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error
+ SetStateFailure(signature *tasks.Signature, err string) error
+ GetState(taskUUID string) (*tasks.TaskState, error)
+
+ // Purging stored stored tasks states and group meta data
+ IsAMQP() bool
+ PurgeState(taskUUID string) error
+ PurgeGroupMeta(groupUUID string) error
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/memcache/memcache.go b/vendor/github.com/RichardKnop/machinery/v1/backends/memcache/memcache.go
new file mode 100644
index 000000000..e6cc10fd2
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/memcache/memcache.go
@@ -0,0 +1,292 @@
+package memcache
+
+import (
+ "bytes"
+ "encoding/json"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+
+ gomemcache "github.com/bradfitz/gomemcache/memcache"
+)
+
+// Backend represents a Memcache result backend
+type Backend struct {
+ common.Backend
+ servers []string
+ client *gomemcache.Client
+}
+
+// New creates Backend instance
+func New(cnf *config.Config, servers []string) iface.Backend {
+ return &Backend{
+ Backend: common.NewBackend(cnf),
+ servers: servers,
+ }
+}
+
+// InitGroup creates and saves a group meta data object
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ groupMeta := &tasks.GroupMeta{
+ GroupUUID: groupUUID,
+ TaskUUIDs: taskUUIDs,
+ CreatedAt: time.Now().UTC(),
+ }
+
+ encoded, err := json.Marshal(&groupMeta)
+ if err != nil {
+ return err
+ }
+
+ return b.getClient().Set(&gomemcache.Item{
+ Key: groupUUID,
+ Value: encoded,
+ Expiration: b.getExpirationTimestamp(),
+ })
+}
+
+// GroupCompleted returns true if all tasks in a group finished
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return false, err
+ }
+
+ taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
+ if err != nil {
+ return false, err
+ }
+
+ var countSuccessTasks = 0
+ for _, taskState := range taskStates {
+ if taskState.IsCompleted() {
+ countSuccessTasks++
+ }
+ }
+
+ return countSuccessTasks == groupTaskCount, nil
+}
+
+// GroupTaskStates returns states of all tasks in the group
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return []*tasks.TaskState{}, err
+ }
+
+ return b.getStates(groupMeta.TaskUUIDs...)
+}
+
+// TriggerChord flags chord as triggered in the backend storage to make sure
+// chord is never trigerred multiple times. Returns a boolean flag to indicate
+// whether the worker should trigger chord (true) or no if it has been triggered
+// already (false)
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return false, err
+ }
+
+ // Chord has already been triggered, return false (should not trigger again)
+ if groupMeta.ChordTriggered {
+ return false, nil
+ }
+
+ // If group meta is locked, wait until it's unlocked
+ for groupMeta.Lock {
+ groupMeta, _ = b.getGroupMeta(groupUUID)
+ log.WARNING.Print("Group meta locked, waiting")
+ time.Sleep(time.Millisecond * 5)
+ }
+
+ // Acquire lock
+ if err = b.lockGroupMeta(groupMeta); err != nil {
+ return false, err
+ }
+ defer b.unlockGroupMeta(groupMeta)
+
+ // Update the group meta data
+ groupMeta.ChordTriggered = true
+ encoded, err := json.Marshal(&groupMeta)
+ if err != nil {
+ return false, err
+ }
+ if err = b.getClient().Replace(&gomemcache.Item{
+ Key: groupUUID,
+ Value: encoded,
+ Expiration: b.getExpirationTimestamp(),
+ }); err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+// SetStatePending updates task state to PENDING
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ taskState := tasks.NewPendingTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateReceived updates task state to RECEIVED
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ taskState := tasks.NewReceivedTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateStarted updates task state to STARTED
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ taskState := tasks.NewStartedTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateRetry updates task state to RETRY
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ state := tasks.NewRetryTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateSuccess updates task state to SUCCESS
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ taskState := tasks.NewSuccessTaskState(signature, results)
+ return b.updateState(taskState)
+}
+
+// SetStateFailure updates task state to FAILURE
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ taskState := tasks.NewFailureTaskState(signature, err)
+ return b.updateState(taskState)
+}
+
+// GetState returns the latest task state
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ item, err := b.getClient().Get(taskUUID)
+ if err != nil {
+ return nil, err
+ }
+
+ state := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader(item.Value))
+ decoder.UseNumber()
+ if err := decoder.Decode(state); err != nil {
+ return nil, err
+ }
+
+ return state, nil
+}
+
+// PurgeState deletes stored task state
+func (b *Backend) PurgeState(taskUUID string) error {
+ return b.getClient().Delete(taskUUID)
+}
+
+// PurgeGroupMeta deletes stored group meta data
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ return b.getClient().Delete(groupUUID)
+}
+
+// updateState saves current task state
+func (b *Backend) updateState(taskState *tasks.TaskState) error {
+ encoded, err := json.Marshal(taskState)
+ if err != nil {
+ return err
+ }
+
+ return b.getClient().Set(&gomemcache.Item{
+ Key: taskState.TaskUUID,
+ Value: encoded,
+ Expiration: b.getExpirationTimestamp(),
+ })
+}
+
+// lockGroupMeta acquires lock on group meta data
+func (b *Backend) lockGroupMeta(groupMeta *tasks.GroupMeta) error {
+ groupMeta.Lock = true
+ encoded, err := json.Marshal(groupMeta)
+ if err != nil {
+ return err
+ }
+
+ return b.getClient().Set(&gomemcache.Item{
+ Key: groupMeta.GroupUUID,
+ Value: encoded,
+ Expiration: b.getExpirationTimestamp(),
+ })
+}
+
+// unlockGroupMeta releases lock on group meta data
+func (b *Backend) unlockGroupMeta(groupMeta *tasks.GroupMeta) error {
+ groupMeta.Lock = false
+ encoded, err := json.Marshal(groupMeta)
+ if err != nil {
+ return err
+ }
+
+ return b.getClient().Set(&gomemcache.Item{
+ Key: groupMeta.GroupUUID,
+ Value: encoded,
+ Expiration: b.getExpirationTimestamp(),
+ })
+}
+
+// getGroupMeta retrieves group meta data, convenience function to avoid repetition
+func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
+ item, err := b.getClient().Get(groupUUID)
+ if err != nil {
+ return nil, err
+ }
+
+ groupMeta := new(tasks.GroupMeta)
+ decoder := json.NewDecoder(bytes.NewReader(item.Value))
+ decoder.UseNumber()
+ if err := decoder.Decode(groupMeta); err != nil {
+ return nil, err
+ }
+
+ return groupMeta, nil
+}
+
+// getStates returns multiple task states
+func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
+ states := make([]*tasks.TaskState, len(taskUUIDs))
+
+ for i, taskUUID := range taskUUIDs {
+ item, err := b.getClient().Get(taskUUID)
+ if err != nil {
+ return nil, err
+ }
+
+ state := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader(item.Value))
+ decoder.UseNumber()
+ if err := decoder.Decode(state); err != nil {
+ return nil, err
+ }
+
+ states[i] = state
+ }
+
+ return states, nil
+}
+
+// getExpirationTimestamp returns expiration timestamp
+func (b *Backend) getExpirationTimestamp() int32 {
+ expiresIn := b.GetConfig().ResultsExpireIn
+ if expiresIn == 0 {
+ // // expire results after 1 hour by default
+ expiresIn = config.DefaultResultsExpireIn
+ }
+ return int32(time.Now().Unix() + int64(expiresIn))
+}
+
+// getClient returns or creates instance of Memcache client
+func (b *Backend) getClient() *gomemcache.Client {
+ if b.client == nil {
+ b.client = gomemcache.New(b.servers...)
+ }
+ return b.client
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/mongo/mongodb.go b/vendor/github.com/RichardKnop/machinery/v1/backends/mongo/mongodb.go
new file mode 100644
index 000000000..863545fa7
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/mongo/mongodb.go
@@ -0,0 +1,358 @@
+package mongo
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// Backend represents a MongoDB result backend
+type Backend struct {
+ common.Backend
+ client *mongo.Client
+ tc *mongo.Collection
+ gmc *mongo.Collection
+ once sync.Once
+}
+
+// New creates Backend instance
+func New(cnf *config.Config) (iface.Backend, error) {
+ backend := &Backend{
+ Backend: common.NewBackend(cnf),
+ once: sync.Once{},
+ }
+
+ return backend, nil
+}
+
+// InitGroup creates and saves a group meta data object
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ groupMeta := &tasks.GroupMeta{
+ GroupUUID: groupUUID,
+ TaskUUIDs: taskUUIDs,
+ CreatedAt: time.Now().UTC(),
+ }
+ _, err := b.groupMetasCollection().InsertOne(context.Background(), groupMeta)
+ return err
+}
+
+// GroupCompleted returns true if all tasks in a group finished
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return false, err
+ }
+
+ taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
+ if err != nil {
+ return false, err
+ }
+
+ var countSuccessTasks = 0
+ for _, taskState := range taskStates {
+ if taskState.IsCompleted() {
+ countSuccessTasks++
+ }
+ }
+
+ return countSuccessTasks == groupTaskCount, nil
+}
+
+// GroupTaskStates returns states of all tasks in the group
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return []*tasks.TaskState{}, err
+ }
+
+ return b.getStates(groupMeta.TaskUUIDs...)
+}
+
+// TriggerChord flags chord as triggered in the backend storage to make sure
+// chord is never triggered multiple times. Returns a boolean flag to indicate
+// whether the worker should trigger chord (true) or no if it has been triggered
+// already (false)
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ query := bson.M{
+ "_id": groupUUID,
+ "chord_triggered": false,
+ }
+ change := bson.M{
+ "$set": bson.M{
+ "chord_triggered": true,
+ },
+ }
+
+ _, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update())
+
+ if err != nil {
+ if err == mongo.ErrNoDocuments {
+ log.WARNING.Printf("Chord already triggered for group %s", groupUUID)
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
+// SetStatePending updates task state to PENDING
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ update := bson.M{
+ "state": tasks.StatePending,
+ "task_name": signature.Name,
+ "created_at": time.Now().UTC(),
+ }
+ return b.updateState(signature, update)
+}
+
+// SetStateReceived updates task state to RECEIVED
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ update := bson.M{"state": tasks.StateReceived}
+ return b.updateState(signature, update)
+}
+
+// SetStateStarted updates task state to STARTED
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ update := bson.M{"state": tasks.StateStarted}
+ return b.updateState(signature, update)
+}
+
+// SetStateRetry updates task state to RETRY
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ update := bson.M{"state": tasks.StateRetry}
+ return b.updateState(signature, update)
+}
+
+// SetStateSuccess updates task state to SUCCESS
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ decodedResults := b.decodeResults(results)
+ update := bson.M{
+ "state": tasks.StateSuccess,
+ "results": decodedResults,
+ }
+ return b.updateState(signature, update)
+}
+
+// decodeResults detects & decodes json strings in TaskResult.Value and returns a new slice
+func (b *Backend) decodeResults(results []*tasks.TaskResult) []*tasks.TaskResult {
+ l := len(results)
+ jsonResults := make([]*tasks.TaskResult, l, l)
+ for i, result := range results {
+ jsonResult := new(bson.M)
+ resultType := reflect.TypeOf(result.Value).Kind()
+ if resultType == reflect.String {
+ err := json.NewDecoder(strings.NewReader(result.Value.(string))).Decode(&jsonResult)
+ if err == nil {
+ jsonResults[i] = &tasks.TaskResult{
+ Type: "json",
+ Value: jsonResult,
+ }
+ continue
+ }
+ }
+ jsonResults[i] = result
+ }
+ return jsonResults
+}
+
+// SetStateFailure updates task state to FAILURE
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ update := bson.M{"state": tasks.StateFailure, "error": err}
+ return b.updateState(signature, update)
+}
+
+// GetState returns the latest task state
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ state := &tasks.TaskState{}
+ err := b.tasksCollection().FindOne(context.Background(), bson.M{"_id": taskUUID}).Decode(state)
+
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
+}
+
+// PurgeState deletes stored task state
+func (b *Backend) PurgeState(taskUUID string) error {
+ _, err := b.tasksCollection().DeleteOne(context.Background(), bson.M{"_id": taskUUID})
+ return err
+}
+
+// PurgeGroupMeta deletes stored group meta data
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ _, err := b.groupMetasCollection().DeleteOne(context.Background(), bson.M{"_id": groupUUID})
+ return err
+}
+
+// lockGroupMeta acquires lock on groupUUID document
+func (b *Backend) lockGroupMeta(groupUUID string) error {
+ query := bson.M{
+ "_id": groupUUID,
+ "lock": false,
+ }
+ change := bson.M{
+ "$set": bson.M{
+ "lock": true,
+ },
+ }
+
+ _, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update().SetUpsert(true))
+
+ return err
+}
+
+// unlockGroupMeta releases lock on groupUUID document
+func (b *Backend) unlockGroupMeta(groupUUID string) error {
+ update := bson.M{"$set": bson.M{"lock": false}}
+ _, err := b.groupMetasCollection().UpdateOne(context.Background(), bson.M{"_id": groupUUID}, update, options.Update())
+ return err
+}
+
+// getGroupMeta retrieves group meta data, convenience function to avoid repetition
+func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
+ groupMeta := &tasks.GroupMeta{}
+ query := bson.M{"_id": groupUUID}
+
+ err := b.groupMetasCollection().FindOne(context.Background(), query).Decode(groupMeta)
+ if err != nil {
+ return nil, err
+ }
+ return groupMeta, nil
+}
+
+// getStates returns multiple task states
+func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
+ states := make([]*tasks.TaskState, 0, len(taskUUIDs))
+ cur, err := b.tasksCollection().Find(context.Background(), bson.M{"_id": bson.M{"$in": taskUUIDs}})
+ if err != nil {
+ return nil, err
+ }
+ defer cur.Close(context.Background())
+
+ for cur.Next(context.Background()) {
+ state := &tasks.TaskState{}
+ if err := cur.Decode(state); err != nil {
+ return nil, err
+ }
+ states = append(states, state)
+ }
+ if cur.Err() != nil {
+ return nil, err
+ }
+ return states, nil
+}
+
+// updateState saves current task state
+func (b *Backend) updateState(signature *tasks.Signature, update bson.M) error {
+ update = bson.M{"$set": update}
+ _, err := b.tasksCollection().UpdateOne(context.Background(), bson.M{"_id": signature.UUID}, update, options.Update().SetUpsert(true))
+ return err
+}
+
+func (b *Backend) tasksCollection() *mongo.Collection {
+ b.once.Do(func() {
+ b.connect()
+ })
+
+ return b.tc
+}
+
+func (b *Backend) groupMetasCollection() *mongo.Collection {
+ b.once.Do(func() {
+ b.connect()
+ })
+
+ return b.gmc
+}
+
+// connect creates the underlying mgo connection if it doesn't exist
+// creates required indexes for our collections
+func (b *Backend) connect() error {
+ client, err := b.dial()
+ if err != nil {
+ return err
+ }
+ b.client = client
+
+ database := "machinery"
+
+ if b.GetConfig().MongoDB != nil {
+ database = b.GetConfig().MongoDB.Database
+ }
+
+ b.tc = b.client.Database(database).Collection("tasks")
+ b.gmc = b.client.Database(database).Collection("group_metas")
+
+ err = b.createMongoIndexes(database)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// dial connects to mongo with TLSConfig if provided
+// else connects via ResultBackend uri
+func (b *Backend) dial() (*mongo.Client, error) {
+
+ if b.GetConfig().MongoDB != nil && b.GetConfig().MongoDB.Client != nil {
+ return b.GetConfig().MongoDB.Client, nil
+ }
+
+ uri := b.GetConfig().ResultBackend
+ if strings.HasPrefix(uri, "mongodb://") == false &&
+ strings.HasPrefix(uri, "mongodb+srv://") == false {
+ uri = fmt.Sprintf("mongodb://%s", uri)
+ }
+
+ client, err := mongo.NewClient(options.Client().ApplyURI(uri))
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ if err := client.Connect(ctx); err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+// createMongoIndexes ensures all indexes are in place
+func (b *Backend) createMongoIndexes(database string) error {
+
+ tasksCollection := b.client.Database(database).Collection("tasks")
+
+ expireIn := int32(b.GetConfig().ResultsExpireIn)
+
+ _, err := tasksCollection.Indexes().CreateMany(context.Background(), []mongo.IndexModel{
+ {
+ Keys: bson.M{"state": 1},
+ Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn),
+ },
+ mongo.IndexModel{
+ Keys: bson.M{"lock": 1},
+ Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn),
+ },
+ })
+ if err != nil {
+ return err
+ }
+
+ return err
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/null/null.go b/vendor/github.com/RichardKnop/machinery/v1/backends/null/null.go
new file mode 100644
index 000000000..18b4b45a0
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/null/null.go
@@ -0,0 +1,150 @@
+package null
+
+import (
+ "fmt"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// ErrGroupNotFound ...
+type ErrGroupNotFound struct {
+ groupUUID string
+}
+
+// NewErrGroupNotFound returns new instance of ErrGroupNotFound
+func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound {
+ return ErrGroupNotFound{groupUUID: groupUUID}
+}
+
+// Error implements error interface
+func (e ErrGroupNotFound) Error() string {
+ return fmt.Sprintf("Group not found: %v", e.groupUUID)
+}
+
+// ErrTasknotFound ...
+type ErrTasknotFound struct {
+ taskUUID string
+}
+
+// NewErrTasknotFound returns new instance of ErrTasknotFound
+func NewErrTasknotFound(taskUUID string) ErrTasknotFound {
+ return ErrTasknotFound{taskUUID: taskUUID}
+}
+
+// Error implements error interface
+func (e ErrTasknotFound) Error() string {
+ return fmt.Sprintf("Task not found: %v", e.taskUUID)
+}
+
+// Backend represents an "eager" in-memory result backend
+type Backend struct {
+ common.Backend
+ groups map[string]struct{}
+}
+
+// New creates EagerBackend instance
+func New() iface.Backend {
+ return &Backend{
+ Backend: common.NewBackend(new(config.Config)),
+ groups: make(map[string]struct{}),
+ }
+}
+
+// InitGroup creates and saves a group meta data object
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ b.groups[groupUUID] = struct{}{}
+ return nil
+}
+
+// GroupCompleted returns true if all tasks in a group finished
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ _, ok := b.groups[groupUUID]
+ if !ok {
+ return false, NewErrGroupNotFound(groupUUID)
+ }
+
+ return true, nil
+}
+
+// GroupTaskStates returns states of all tasks in the group
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ _, ok := b.groups[groupUUID]
+ if !ok {
+ return nil, NewErrGroupNotFound(groupUUID)
+ }
+
+ ret := make([]*tasks.TaskState, 0, groupTaskCount)
+ return ret, nil
+}
+
+// TriggerChord flags chord as triggered in the backend storage to make sure
+// chord is never trigerred multiple times. Returns a boolean flag to indicate
+// whether the worker should trigger chord (true) or no if it has been triggered
+// already (false)
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ return true, nil
+}
+
+// SetStatePending updates task state to PENDING
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ state := tasks.NewPendingTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateReceived updates task state to RECEIVED
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ state := tasks.NewReceivedTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateStarted updates task state to STARTED
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ state := tasks.NewStartedTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateRetry updates task state to RETRY
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ state := tasks.NewRetryTaskState(signature)
+ return b.updateState(state)
+}
+
+// SetStateSuccess updates task state to SUCCESS
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ state := tasks.NewSuccessTaskState(signature, results)
+ return b.updateState(state)
+}
+
+// SetStateFailure updates task state to FAILURE
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ state := tasks.NewFailureTaskState(signature, err)
+ return b.updateState(state)
+}
+
+// GetState returns the latest task state
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ return nil, NewErrTasknotFound(taskUUID)
+}
+
+// PurgeState deletes stored task state
+func (b *Backend) PurgeState(taskUUID string) error {
+ return NewErrTasknotFound(taskUUID)
+}
+
+// PurgeGroupMeta deletes stored group meta data
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ _, ok := b.groups[groupUUID]
+ if !ok {
+ return NewErrGroupNotFound(groupUUID)
+ }
+
+ return nil
+}
+
+func (b *Backend) updateState(s *tasks.TaskState) error {
+ // simulate the behavior of json marshal/unmarshal
+ return nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/redis/redis.go b/vendor/github.com/RichardKnop/machinery/v1/backends/redis/redis.go
new file mode 100644
index 000000000..be9d32b75
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/redis/redis.go
@@ -0,0 +1,338 @@
+package redis
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/RichardKnop/redsync"
+ "github.com/gomodule/redigo/redis"
+)
+
+// Backend represents a Redis result backend
+type Backend struct {
+ common.Backend
+ host string
+ password string
+ db int
+ pool *redis.Pool
+ // If set, path to a socket file overrides hostname
+ socketPath string
+ redsync *redsync.Redsync
+ redisOnce sync.Once
+ common.RedisConnector
+}
+
+// New creates Backend instance
+func New(cnf *config.Config, host, password, socketPath string, db int) iface.Backend {
+ return &Backend{
+ Backend: common.NewBackend(cnf),
+ host: host,
+ db: db,
+ password: password,
+ socketPath: socketPath,
+ }
+}
+
+// InitGroup creates and saves a group meta data object
+func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error {
+ groupMeta := &tasks.GroupMeta{
+ GroupUUID: groupUUID,
+ TaskUUIDs: taskUUIDs,
+ CreatedAt: time.Now().UTC(),
+ }
+
+ encoded, err := json.Marshal(groupMeta)
+ if err != nil {
+ return err
+ }
+
+ conn := b.open()
+ defer conn.Close()
+
+ _, err = conn.Do("SET", groupUUID, encoded)
+ if err != nil {
+ return err
+ }
+
+ return b.setExpirationTime(groupUUID)
+}
+
+// GroupCompleted returns true if all tasks in a group finished
+func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return false, err
+ }
+
+ taskStates, err := b.getStates(groupMeta.TaskUUIDs...)
+ if err != nil {
+ return false, err
+ }
+
+ var countSuccessTasks = 0
+ for _, taskState := range taskStates {
+ if taskState.IsCompleted() {
+ countSuccessTasks++
+ }
+ }
+
+ return countSuccessTasks == groupTaskCount, nil
+}
+
+// GroupTaskStates returns states of all tasks in the group
+func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) {
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return []*tasks.TaskState{}, err
+ }
+
+ return b.getStates(groupMeta.TaskUUIDs...)
+}
+
+// TriggerChord flags chord as triggered in the backend storage to make sure
+// chord is never trigerred multiple times. Returns a boolean flag to indicate
+// whether the worker should trigger chord (true) or no if it has been triggered
+// already (false)
+func (b *Backend) TriggerChord(groupUUID string) (bool, error) {
+ conn := b.open()
+ defer conn.Close()
+
+ m := b.redsync.NewMutex("TriggerChordMutex")
+ if err := m.Lock(); err != nil {
+ return false, err
+ }
+ defer m.Unlock()
+
+ groupMeta, err := b.getGroupMeta(groupUUID)
+ if err != nil {
+ return false, err
+ }
+
+ // Chord has already been triggered, return false (should not trigger again)
+ if groupMeta.ChordTriggered {
+ return false, nil
+ }
+
+ // Set flag to true
+ groupMeta.ChordTriggered = true
+
+ // Update the group meta
+ encoded, err := json.Marshal(&groupMeta)
+ if err != nil {
+ return false, err
+ }
+
+ _, err = conn.Do("SET", groupUUID, encoded)
+ if err != nil {
+ return false, err
+ }
+
+ return true, b.setExpirationTime(groupUUID)
+}
+
+func (b *Backend) mergeNewTaskState(newState *tasks.TaskState) {
+ state, err := b.GetState(newState.TaskUUID)
+ if err == nil {
+ newState.CreatedAt = state.CreatedAt
+ newState.TaskName = state.TaskName
+ }
+}
+
+// SetStatePending updates task state to PENDING
+func (b *Backend) SetStatePending(signature *tasks.Signature) error {
+ taskState := tasks.NewPendingTaskState(signature)
+ return b.updateState(taskState)
+}
+
+// SetStateReceived updates task state to RECEIVED
+func (b *Backend) SetStateReceived(signature *tasks.Signature) error {
+ taskState := tasks.NewReceivedTaskState(signature)
+ b.mergeNewTaskState(taskState)
+ return b.updateState(taskState)
+}
+
+// SetStateStarted updates task state to STARTED
+func (b *Backend) SetStateStarted(signature *tasks.Signature) error {
+ taskState := tasks.NewStartedTaskState(signature)
+ b.mergeNewTaskState(taskState)
+ return b.updateState(taskState)
+}
+
+// SetStateRetry updates task state to RETRY
+func (b *Backend) SetStateRetry(signature *tasks.Signature) error {
+ taskState := tasks.NewRetryTaskState(signature)
+ b.mergeNewTaskState(taskState)
+ return b.updateState(taskState)
+}
+
+// SetStateSuccess updates task state to SUCCESS
+func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error {
+ taskState := tasks.NewSuccessTaskState(signature, results)
+ b.mergeNewTaskState(taskState)
+ return b.updateState(taskState)
+}
+
+// SetStateFailure updates task state to FAILURE
+func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error {
+ taskState := tasks.NewFailureTaskState(signature, err)
+ b.mergeNewTaskState(taskState)
+ return b.updateState(taskState)
+}
+
+// GetState returns the latest task state
+func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) {
+ conn := b.open()
+ defer conn.Close()
+
+ item, err := redis.Bytes(conn.Do("GET", taskUUID))
+ if err != nil {
+ return nil, err
+ }
+ state := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader(item))
+ decoder.UseNumber()
+ if err := decoder.Decode(state); err != nil {
+ return nil, err
+ }
+
+ return state, nil
+}
+
+// PurgeState deletes stored task state
+func (b *Backend) PurgeState(taskUUID string) error {
+ conn := b.open()
+ defer conn.Close()
+
+ _, err := conn.Do("DEL", taskUUID)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// PurgeGroupMeta deletes stored group meta data
+func (b *Backend) PurgeGroupMeta(groupUUID string) error {
+ conn := b.open()
+ defer conn.Close()
+
+ _, err := conn.Do("DEL", groupUUID)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// getGroupMeta retrieves group meta data, convenience function to avoid repetition
+func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) {
+ conn := b.open()
+ defer conn.Close()
+
+ item, err := redis.Bytes(conn.Do("GET", groupUUID))
+ if err != nil {
+ return nil, err
+ }
+
+ groupMeta := new(tasks.GroupMeta)
+ decoder := json.NewDecoder(bytes.NewReader(item))
+ decoder.UseNumber()
+ if err := decoder.Decode(groupMeta); err != nil {
+ return nil, err
+ }
+
+ return groupMeta, nil
+}
+
+// getStates returns multiple task states
+func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) {
+ taskStates := make([]*tasks.TaskState, len(taskUUIDs))
+
+ conn := b.open()
+ defer conn.Close()
+
+ // conn.Do requires []interface{}... can't pass []string unfortunately
+ taskUUIDInterfaces := make([]interface{}, len(taskUUIDs))
+ for i, taskUUID := range taskUUIDs {
+ taskUUIDInterfaces[i] = interface{}(taskUUID)
+ }
+
+ reply, err := redis.Values(conn.Do("MGET", taskUUIDInterfaces...))
+ if err != nil {
+ return taskStates, err
+ }
+
+ for i, value := range reply {
+ stateBytes, ok := value.([]byte)
+ if !ok {
+ return taskStates, fmt.Errorf("Expected byte array, instead got: %v", value)
+ }
+
+ taskState := new(tasks.TaskState)
+ decoder := json.NewDecoder(bytes.NewReader(stateBytes))
+ decoder.UseNumber()
+ if err := decoder.Decode(taskState); err != nil {
+ log.ERROR.Print(err)
+ return taskStates, err
+ }
+
+ taskStates[i] = taskState
+ }
+
+ return taskStates, nil
+}
+
+// updateState saves current task state
+func (b *Backend) updateState(taskState *tasks.TaskState) error {
+ conn := b.open()
+ defer conn.Close()
+
+ encoded, err := json.Marshal(taskState)
+ if err != nil {
+ return err
+ }
+
+ _, err = conn.Do("SET", taskState.TaskUUID, encoded)
+ if err != nil {
+ return err
+ }
+
+ return b.setExpirationTime(taskState.TaskUUID)
+}
+
+// setExpirationTime sets expiration timestamp on a stored task state
+func (b *Backend) setExpirationTime(key string) error {
+ expiresIn := b.GetConfig().ResultsExpireIn
+ if expiresIn == 0 {
+ // // expire results after 1 hour by default
+ expiresIn = config.DefaultResultsExpireIn
+ }
+ expirationTimestamp := int32(time.Now().Unix() + int64(expiresIn))
+
+ conn := b.open()
+ defer conn.Close()
+
+ _, err := conn.Do("EXPIREAT", key, expirationTimestamp)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// open returns or creates instance of Redis connection
+func (b *Backend) open() redis.Conn {
+ b.redisOnce.Do(func() {
+ b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig)
+ b.redsync = redsync.New([]redsync.Pool{b.pool})
+ })
+ return b.pool.Get()
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/backends/result/async_result.go b/vendor/github.com/RichardKnop/machinery/v1/backends/result/async_result.go
new file mode 100644
index 000000000..15ec8cf5b
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/backends/result/async_result.go
@@ -0,0 +1,256 @@
+package result
+
+import (
+ "errors"
+ "reflect"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/backends/iface"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+var (
+ // ErrBackendNotConfigured ...
+ ErrBackendNotConfigured = errors.New("Result backend not configured")
+ // ErrTimeoutReached ...
+ ErrTimeoutReached = errors.New("Timeout reached")
+)
+
+// AsyncResult represents a task result
+type AsyncResult struct {
+ Signature *tasks.Signature
+ taskState *tasks.TaskState
+ backend iface.Backend
+}
+
+// ChordAsyncResult represents a result of a chord
+type ChordAsyncResult struct {
+ groupAsyncResults []*AsyncResult
+ chordAsyncResult *AsyncResult
+ backend iface.Backend
+}
+
+// ChainAsyncResult represents a result of a chain of tasks
+type ChainAsyncResult struct {
+ asyncResults []*AsyncResult
+ backend iface.Backend
+}
+
+// NewAsyncResult creates AsyncResult instance
+func NewAsyncResult(signature *tasks.Signature, backend iface.Backend) *AsyncResult {
+ return &AsyncResult{
+ Signature: signature,
+ taskState: new(tasks.TaskState),
+ backend: backend,
+ }
+}
+
+// NewChordAsyncResult creates ChordAsyncResult instance
+func NewChordAsyncResult(groupTasks []*tasks.Signature, chordCallback *tasks.Signature, backend iface.Backend) *ChordAsyncResult {
+ asyncResults := make([]*AsyncResult, len(groupTasks))
+ for i, task := range groupTasks {
+ asyncResults[i] = NewAsyncResult(task, backend)
+ }
+ return &ChordAsyncResult{
+ groupAsyncResults: asyncResults,
+ chordAsyncResult: NewAsyncResult(chordCallback, backend),
+ backend: backend,
+ }
+}
+
+// NewChainAsyncResult creates ChainAsyncResult instance
+func NewChainAsyncResult(tasks []*tasks.Signature, backend iface.Backend) *ChainAsyncResult {
+ asyncResults := make([]*AsyncResult, len(tasks))
+ for i, task := range tasks {
+ asyncResults[i] = NewAsyncResult(task, backend)
+ }
+ return &ChainAsyncResult{
+ asyncResults: asyncResults,
+ backend: backend,
+ }
+}
+
+// Touch the state and don't wait
+func (asyncResult *AsyncResult) Touch() ([]reflect.Value, error) {
+ if asyncResult.backend == nil {
+ return nil, ErrBackendNotConfigured
+ }
+
+ asyncResult.GetState()
+
+ // Purge state if we are using AMQP backend
+ if asyncResult.backend.IsAMQP() && asyncResult.taskState.IsCompleted() {
+ asyncResult.backend.PurgeState(asyncResult.taskState.TaskUUID)
+ }
+
+ if asyncResult.taskState.IsFailure() {
+ return nil, errors.New(asyncResult.taskState.Error)
+ }
+
+ if asyncResult.taskState.IsSuccess() {
+ return tasks.ReflectTaskResults(asyncResult.taskState.Results)
+ }
+
+ return nil, nil
+}
+
+// Get returns task results (synchronous blocking call)
+func (asyncResult *AsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) {
+ for {
+ results, err := asyncResult.Touch()
+
+ if results == nil && err == nil {
+ time.Sleep(sleepDuration)
+ } else {
+ return results, err
+ }
+ }
+}
+
+// GetWithTimeout returns task results with a timeout (synchronous blocking call)
+func (asyncResult *AsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) {
+ timeout := time.NewTimer(timeoutDuration)
+
+ for {
+ select {
+ case <-timeout.C:
+ return nil, ErrTimeoutReached
+ default:
+ results, err := asyncResult.Touch()
+
+ if results == nil && err == nil {
+ time.Sleep(sleepDuration)
+ } else {
+ return results, err
+ }
+ }
+ }
+}
+
+// GetState returns latest task state
+func (asyncResult *AsyncResult) GetState() *tasks.TaskState {
+ if asyncResult.taskState.IsCompleted() {
+ return asyncResult.taskState
+ }
+
+ taskState, err := asyncResult.backend.GetState(asyncResult.Signature.UUID)
+ if err == nil {
+ asyncResult.taskState = taskState
+ }
+
+ return asyncResult.taskState
+}
+
+// Get returns results of a chain of tasks (synchronous blocking call)
+func (chainAsyncResult *ChainAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) {
+ if chainAsyncResult.backend == nil {
+ return nil, ErrBackendNotConfigured
+ }
+
+ var (
+ results []reflect.Value
+ err error
+ )
+
+ for _, asyncResult := range chainAsyncResult.asyncResults {
+ results, err = asyncResult.Get(sleepDuration)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return results, err
+}
+
+// Get returns result of a chord (synchronous blocking call)
+func (chordAsyncResult *ChordAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) {
+ if chordAsyncResult.backend == nil {
+ return nil, ErrBackendNotConfigured
+ }
+
+ var err error
+ for _, asyncResult := range chordAsyncResult.groupAsyncResults {
+ _, err = asyncResult.Get(sleepDuration)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return chordAsyncResult.chordAsyncResult.Get(sleepDuration)
+}
+
+// GetWithTimeout returns results of a chain of tasks with timeout (synchronous blocking call)
+func (chainAsyncResult *ChainAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) {
+ if chainAsyncResult.backend == nil {
+ return nil, ErrBackendNotConfigured
+ }
+
+ var (
+ results []reflect.Value
+ err error
+ )
+
+ timeout := time.NewTimer(timeoutDuration)
+ ln := len(chainAsyncResult.asyncResults)
+ lastResult := chainAsyncResult.asyncResults[ln-1]
+
+ for {
+ select {
+ case <-timeout.C:
+ return nil, ErrTimeoutReached
+ default:
+
+ for _, asyncResult := range chainAsyncResult.asyncResults {
+ _, errcur := asyncResult.Touch()
+ if errcur != nil {
+ return nil, err
+ }
+ }
+
+ results, err = lastResult.Touch()
+ if err != nil {
+ return nil, err
+ }
+ if results != nil {
+ return results, err
+ }
+ time.Sleep(sleepDuration)
+ }
+ }
+}
+
+// GetWithTimeout returns result of a chord with a timeout (synchronous blocking call)
+func (chordAsyncResult *ChordAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) {
+ if chordAsyncResult.backend == nil {
+ return nil, ErrBackendNotConfigured
+ }
+
+ var (
+ results []reflect.Value
+ err error
+ )
+
+ timeout := time.NewTimer(timeoutDuration)
+ for {
+ select {
+ case <-timeout.C:
+ return nil, ErrTimeoutReached
+ default:
+ for _, asyncResult := range chordAsyncResult.groupAsyncResults {
+ _, errcur := asyncResult.Touch()
+ if errcur != nil {
+ return nil, err
+ }
+ }
+
+ results, err = chordAsyncResult.chordAsyncResult.Touch()
+ if err != nil {
+ return nil, nil
+ }
+ if results != nil {
+ return results, err
+ }
+ time.Sleep(sleepDuration)
+ }
+ }
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/amqp/amqp.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/amqp/amqp.go
new file mode 100644
index 000000000..e0670af4b
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/amqp/amqp.go
@@ -0,0 +1,424 @@
+package amqp
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/brokers/errs"
+ "github.com/RichardKnop/machinery/v1/brokers/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/pkg/errors"
+ "github.com/streadway/amqp"
+)
+
+type AMQPConnection struct {
+ queueName string
+ connection *amqp.Connection
+ channel *amqp.Channel
+ queue amqp.Queue
+ confirmation <-chan amqp.Confirmation
+ errorchan <-chan *amqp.Error
+ cleanup chan struct{}
+}
+
+// Broker represents an AMQP broker
+type Broker struct {
+ common.Broker
+ common.AMQPConnector
+ processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal
+
+ connections map[string]*AMQPConnection
+ connectionsMutex sync.RWMutex
+}
+
+// New creates new Broker instance
+func New(cnf *config.Config) iface.Broker {
+ return &Broker{Broker: common.NewBroker(cnf), AMQPConnector: common.AMQPConnector{}, connections: make(map[string]*AMQPConnection)}
+}
+
+// StartConsuming enters a loop and waits for incoming messages
+func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
+ b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)
+
+ queueName := taskProcessor.CustomQueue()
+ if queueName == "" {
+ queueName = b.GetConfig().DefaultQueue
+ }
+
+ conn, channel, queue, _, amqpCloseChan, err := b.Connect(
+ b.GetConfig().Broker,
+ b.GetConfig().TLSConfig,
+ b.GetConfig().AMQP.Exchange, // exchange name
+ b.GetConfig().AMQP.ExchangeType, // exchange type
+ queueName, // queue name
+ true, // queue durable
+ false, // queue delete when unused
+ b.GetConfig().AMQP.BindingKey, // queue binding key
+ nil, // exchange declare args
+ nil, // queue declare args
+ amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args
+ )
+ if err != nil {
+ b.GetRetryFunc()(b.GetRetryStopChan())
+ return b.GetRetry(), err
+ }
+ defer b.Close(channel, conn)
+
+ if err = channel.Qos(
+ b.GetConfig().AMQP.PrefetchCount,
+ 0, // prefetch size
+ false, // global
+ ); err != nil {
+ return b.GetRetry(), fmt.Errorf("Channel qos error: %s", err)
+ }
+
+ deliveries, err := channel.Consume(
+ queue.Name, // queue
+ consumerTag, // consumer tag
+ false, // auto-ack
+ false, // exclusive
+ false, // no-local
+ false, // no-wait
+ nil, // arguments
+ )
+ if err != nil {
+ return b.GetRetry(), fmt.Errorf("Queue consume error: %s", err)
+ }
+
+ log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C")
+
+ if err := b.consume(deliveries, concurrency, taskProcessor, amqpCloseChan); err != nil {
+ return b.GetRetry(), err
+ }
+
+ // Waiting for any tasks being processed to finish
+ b.processingWG.Wait()
+
+ return b.GetRetry(), nil
+}
+
+// StopConsuming quits the loop
+func (b *Broker) StopConsuming() {
+ b.Broker.StopConsuming()
+
+ // Waiting for any tasks being processed to finish
+ b.processingWG.Wait()
+}
+
+// GetOrOpenConnection will return a connection on a particular queue name. Open connections
+// are saved to avoid having to reopen connection for multiple queues
+func (b *Broker) GetOrOpenConnection(queueName string, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*AMQPConnection, error) {
+ var err error
+
+ b.connectionsMutex.Lock()
+ defer b.connectionsMutex.Unlock()
+
+ conn, ok := b.connections[queueName]
+ if !ok {
+ conn = &AMQPConnection{
+ queueName: queueName,
+ cleanup: make(chan struct{}),
+ }
+ conn.connection, conn.channel, conn.queue, conn.confirmation, conn.errorchan, err = b.Connect(
+ b.GetConfig().Broker,
+ b.GetConfig().TLSConfig,
+ b.GetConfig().AMQP.Exchange, // exchange name
+ b.GetConfig().AMQP.ExchangeType, // exchange type
+ queueName, // queue name
+ true, // queue durable
+ false, // queue delete when unused
+ queueBindingKey, // queue binding key
+ exchangeDeclareArgs, // exchange declare args
+ queueDeclareArgs, // queue declare args
+ queueBindingArgs, // queue binding args
+ )
+ if err != nil {
+ return nil, errors.Wrapf(err, "Failed to connect to queue %s", queueName)
+ }
+
+ // Reconnect to the channel if it disconnects/errors out
+ go func() {
+ select {
+ case err = <-conn.errorchan:
+ log.INFO.Printf("Error occured on queue: %s. Reconnecting", queueName)
+ b.connectionsMutex.Lock()
+ delete(b.connections, queueName)
+ b.connectionsMutex.Unlock()
+ _, err := b.GetOrOpenConnection(queueName, queueBindingKey, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs)
+ if err != nil {
+ log.ERROR.Printf("Failed to reopen queue: %s.", queueName)
+ }
+ case <-conn.cleanup:
+ return
+ }
+ return
+ }()
+ b.connections[queueName] = conn
+ }
+ return conn, nil
+}
+
+func (b *Broker) CloseConnections() error {
+ b.connectionsMutex.Lock()
+ defer b.connectionsMutex.Unlock()
+
+ for key, conn := range b.connections {
+ if err := b.Close(conn.channel, conn.connection); err != nil {
+ log.ERROR.Print("Failed to close channel")
+ return nil
+ }
+ close(conn.cleanup)
+ delete(b.connections, key)
+ }
+ return nil
+}
+
+// Publish places a new message on the default queue
+func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
+ // Adjust routing key (this decides which queue the message will be published to)
+ b.AdjustRoutingKey(signature)
+
+ msg, err := json.Marshal(signature)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ // Check the ETA signature field, if it is set and it is in the future,
+ // delay the task
+ if signature.ETA != nil {
+ now := time.Now().UTC()
+
+ if signature.ETA.After(now) {
+ delayMs := int64(signature.ETA.Sub(now) / time.Millisecond)
+
+ return b.delay(signature, delayMs)
+ }
+ }
+
+ queue := b.GetConfig().DefaultQueue
+ bindingKey := b.GetConfig().AMQP.BindingKey // queue binding key
+ if b.isDirectExchange() {
+ queue = signature.RoutingKey
+ bindingKey = signature.RoutingKey
+ }
+
+ connection, err := b.GetOrOpenConnection(
+ queue,
+ bindingKey, // queue binding key
+ nil, // exchange declare args
+ nil, // queue declare args
+ amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args
+ )
+ if err != nil {
+ return errors.Wrapf(err, "Failed to get a connection for queue %s", queue)
+ }
+
+ channel := connection.channel
+ confirmsChan := connection.confirmation
+
+ if err := channel.Publish(
+ b.GetConfig().AMQP.Exchange, // exchange name
+ signature.RoutingKey, // routing key
+ false, // mandatory
+ false, // immediate
+ amqp.Publishing{
+ Headers: amqp.Table(signature.Headers),
+ ContentType: "application/json",
+ Body: msg,
+ DeliveryMode: amqp.Persistent,
+ },
+ ); err != nil {
+ return errors.Wrap(err, "Failed to publish task")
+ }
+
+ confirmed := <-confirmsChan
+
+ if confirmed.Ack {
+ return nil
+ }
+
+ return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag)
+}
+
+// consume takes delivered messages from the channel and manages a worker pool
+// to process tasks concurrently
+func (b *Broker) consume(deliveries <-chan amqp.Delivery, concurrency int, taskProcessor iface.TaskProcessor, amqpCloseChan <-chan *amqp.Error) error {
+ pool := make(chan struct{}, concurrency)
+
+ // initialize worker pool with maxWorkers workers
+ go func() {
+ for i := 0; i < concurrency; i++ {
+ pool <- struct{}{}
+ }
+ }()
+
+ errorsChan := make(chan error)
+
+ for {
+ select {
+ case amqpErr := <-amqpCloseChan:
+ return amqpErr
+ case err := <-errorsChan:
+ return err
+ case d := <-deliveries:
+ if concurrency > 0 {
+ // get worker from pool (blocks until one is available)
+ <-pool
+ }
+
+ b.processingWG.Add(1)
+
+ // Consume the task inside a gotourine so multiple tasks
+ // can be processed concurrently
+ go func() {
+ if err := b.consumeOne(d, taskProcessor); err != nil {
+ errorsChan <- err
+ }
+
+ b.processingWG.Done()
+
+ if concurrency > 0 {
+ // give worker back to pool
+ pool <- struct{}{}
+ }
+ }()
+ case <-b.GetStopChan():
+ return nil
+ }
+ }
+}
+
+// consumeOne processes a single message using TaskProcessor
+func (b *Broker) consumeOne(delivery amqp.Delivery, taskProcessor iface.TaskProcessor) error {
+ if len(delivery.Body) == 0 {
+ delivery.Nack(true, false) // multiple, requeue
+ return errors.New("Received an empty message") // RabbitMQ down?
+ }
+
+ var multiple, requeue = false, false
+
+ // Unmarshal message body into signature struct
+ signature := new(tasks.Signature)
+ decoder := json.NewDecoder(bytes.NewReader(delivery.Body))
+ decoder.UseNumber()
+ if err := decoder.Decode(signature); err != nil {
+ delivery.Nack(multiple, requeue)
+ return errs.NewErrCouldNotUnmarshaTaskSignature(delivery.Body, err)
+ }
+
+ // If the task is not registered, we nack it and requeue,
+ // there might be different workers for processing specific tasks
+ if !b.IsTaskRegistered(signature.Name) {
+ if !delivery.Redelivered {
+ requeue = true
+ log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery.Body)
+ }
+ delivery.Nack(multiple, requeue)
+ return nil
+ }
+
+ log.INFO.Printf("Received new message: %s", delivery.Body)
+
+ err := taskProcessor.Process(signature)
+ delivery.Ack(multiple)
+ return err
+}
+
+// delay a task by delayDuration miliseconds, the way it works is a new queue
+// is created without any consumers, the message is then published to this queue
+// with appropriate ttl expiration headers, after the expiration, it is sent to
+// the proper queue with consumers
+func (b *Broker) delay(signature *tasks.Signature, delayMs int64) error {
+ if delayMs <= 0 {
+ return errors.New("Cannot delay task by 0ms")
+ }
+
+ message, err := json.Marshal(signature)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ // It's necessary to redeclare the queue each time (to zero its TTL timer).
+ queueName := fmt.Sprintf(
+ "delay.%d.%s.%s",
+ delayMs, // delay duration in mileseconds
+ b.GetConfig().AMQP.Exchange,
+ signature.RoutingKey, // routing key
+ )
+ declareQueueArgs := amqp.Table{
+ // Exchange where to send messages after TTL expiration.
+ "x-dead-letter-exchange": b.GetConfig().AMQP.Exchange,
+ // Routing key which use when resending expired messages.
+ "x-dead-letter-routing-key": signature.RoutingKey,
+ // Time in milliseconds
+ // after that message will expire and be sent to destination.
+ "x-message-ttl": delayMs,
+ // Time after that the queue will be deleted.
+ "x-expires": delayMs * 2,
+ }
+ conn, channel, _, _, _, err := b.Connect(
+ b.GetConfig().Broker,
+ b.GetConfig().TLSConfig,
+ b.GetConfig().AMQP.Exchange, // exchange name
+ b.GetConfig().AMQP.ExchangeType, // exchange type
+ queueName, // queue name
+ true, // queue durable
+ b.GetConfig().AMQP.AutoDelete, // queue delete when unused
+ queueName, // queue binding key
+ nil, // exchange declare args
+ declareQueueArgs, // queue declare args
+ amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args
+ )
+ if err != nil {
+ return err
+ }
+
+ defer b.Close(channel, conn)
+
+ if err := channel.Publish(
+ b.GetConfig().AMQP.Exchange, // exchange
+ queueName, // routing key
+ false, // mandatory
+ false, // immediate
+ amqp.Publishing{
+ Headers: amqp.Table(signature.Headers),
+ ContentType: "application/json",
+ Body: message,
+ DeliveryMode: amqp.Persistent,
+ },
+ ); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Broker) isDirectExchange() bool {
+ return b.GetConfig().AMQP != nil && b.GetConfig().AMQP.ExchangeType == "direct"
+}
+
+// AdjustRoutingKey makes sure the routing key is correct.
+// If the routing key is an empty string:
+// a) set it to binding key for direct exchange type
+// b) set it to default queue name
+func (b *Broker) AdjustRoutingKey(s *tasks.Signature) {
+ if s.RoutingKey != "" {
+ return
+ }
+
+ if b.isDirectExchange() {
+ // The routing algorithm behind a direct exchange is simple - a message goes
+ // to the queues whose binding key exactly matches the routing key of the message.
+ s.RoutingKey = b.GetConfig().AMQP.BindingKey
+ return
+ }
+
+ s.RoutingKey = b.GetConfig().DefaultQueue
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/eager/eager.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/eager/eager.go
new file mode 100644
index 000000000..751ef4ce5
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/eager/eager.go
@@ -0,0 +1,73 @@
+package eager
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/RichardKnop/machinery/v1/brokers/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// Broker represents an "eager" in-memory broker
+type Broker struct {
+ worker iface.TaskProcessor
+ common.Broker
+}
+
+// New creates new Broker instance
+func New() iface.Broker {
+ return new(Broker)
+}
+
+// Mode interface with methods specific for this broker
+type Mode interface {
+ AssignWorker(p iface.TaskProcessor)
+}
+
+// StartConsuming enters a loop and waits for incoming messages
+func (eagerBroker *Broker) StartConsuming(consumerTag string, concurrency int, p iface.TaskProcessor) (bool, error) {
+ return true, nil
+}
+
+// StopConsuming quits the loop
+func (eagerBroker *Broker) StopConsuming() {
+ // do nothing
+}
+
+// Publish places a new message on the default queue
+func (eagerBroker *Broker) Publish(ctx context.Context, task *tasks.Signature) error {
+ if eagerBroker.worker == nil {
+ return errors.New("worker is not assigned in eager-mode")
+ }
+
+ // faking the behavior to marshal input into json
+ // and unmarshal it back
+ message, err := json.Marshal(task)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ signature := new(tasks.Signature)
+ decoder := json.NewDecoder(bytes.NewReader(message))
+ decoder.UseNumber()
+ if err := decoder.Decode(signature); err != nil {
+ return fmt.Errorf("JSON unmarshal error: %s", err)
+ }
+
+ // blocking call to the task directly
+ return eagerBroker.worker.Process(signature)
+}
+
+// GetPendingTasks returns a slice of task.Signatures waiting in the queue
+func (eagerBroker *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
+ return []*tasks.Signature{}, errors.New("Not implemented")
+}
+
+// AssignWorker assigns a worker to the eager broker
+func (eagerBroker *Broker) AssignWorker(w iface.TaskProcessor) {
+ eagerBroker.worker = w
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/errs/errors.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/errs/errors.go
new file mode 100644
index 000000000..c7ed5849d
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/errs/errors.go
@@ -0,0 +1,25 @@
+package errs
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrCouldNotUnmarshaTaskSignature ...
+type ErrCouldNotUnmarshaTaskSignature struct {
+ msg []byte
+ reason string
+}
+
+// Error implements the error interface
+func (e ErrCouldNotUnmarshaTaskSignature) Error() string {
+ return fmt.Sprintf("Could not unmarshal '%s' into a task signature: %v", e.msg, e.reason)
+}
+
+// NewErrCouldNotUnmarshaTaskSignature returns new ErrCouldNotUnmarshaTaskSignature instance
+func NewErrCouldNotUnmarshaTaskSignature(msg []byte, err error) ErrCouldNotUnmarshaTaskSignature {
+ return ErrCouldNotUnmarshaTaskSignature{msg: msg, reason: err.Error()}
+}
+
+// ErrConsumerStopped indicates that the operation is now illegal because of the consumer being stopped.
+var ErrConsumerStopped = errors.New("the server has been stopped")
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/gcppubsub/gcp_pubsub.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/gcppubsub/gcp_pubsub.go
new file mode 100644
index 000000000..5e374a9b3
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/gcppubsub/gcp_pubsub.go
@@ -0,0 +1,196 @@
+package gcppubsub
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "cloud.google.com/go/pubsub"
+ "github.com/RichardKnop/machinery/v1/brokers/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// Broker represents an Google Cloud Pub/Sub broker
+type Broker struct {
+ common.Broker
+
+ service *pubsub.Client
+ subscriptionName string
+ MaxExtension time.Duration
+
+ stopDone chan struct{}
+}
+
+// New creates new Broker instance
+func New(cnf *config.Config, projectID, subscriptionName string) (iface.Broker, error) {
+ b := &Broker{Broker: common.NewBroker(cnf), stopDone: make(chan struct{})}
+ b.subscriptionName = subscriptionName
+
+ ctx := context.Background()
+
+ if cnf.GCPPubSub != nil {
+ b.MaxExtension = cnf.GCPPubSub.MaxExtension
+ }
+
+ if cnf.GCPPubSub != nil && cnf.GCPPubSub.Client != nil {
+ b.service = cnf.GCPPubSub.Client
+ } else {
+ pubsubClient, err := pubsub.NewClient(ctx, projectID)
+ if err != nil {
+ return nil, err
+ }
+ b.service = pubsubClient
+ cnf.GCPPubSub = &config.GCPPubSubConfig{
+ Client: pubsubClient,
+ }
+ }
+
+ // Validate topic exists
+ defaultQueue := b.GetConfig().DefaultQueue
+ topic := b.service.Topic(defaultQueue)
+ defer topic.Stop()
+
+ topicExists, err := topic.Exists(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if !topicExists {
+ return nil, fmt.Errorf("topic does not exist, instead got %s", defaultQueue)
+ }
+
+ // Validate subscription exists
+ sub := b.service.Subscription(b.subscriptionName)
+
+ if b.MaxExtension != 0 {
+ sub.ReceiveSettings.MaxExtension = b.MaxExtension
+ }
+
+ subscriptionExists, err := sub.Exists(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if !subscriptionExists {
+ return nil, fmt.Errorf("subscription does not exist, instead got %s", b.subscriptionName)
+ }
+
+ return b, nil
+}
+
+// StartConsuming enters a loop and waits for incoming messages
+func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
+ b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)
+
+ sub := b.service.Subscription(b.subscriptionName)
+
+ if b.MaxExtension != 0 {
+ sub.ReceiveSettings.MaxExtension = b.MaxExtension
+ }
+
+ sub.ReceiveSettings.NumGoroutines = concurrency
+ log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ <-b.GetStopChan()
+ cancel()
+ }()
+
+ for {
+ err := sub.Receive(ctx, func(_ctx context.Context, msg *pubsub.Message) {
+ b.consumeOne(msg, taskProcessor)
+ })
+ if err == nil {
+ break
+ }
+
+ log.ERROR.Printf("Error when receiving messages. Error: %v", err)
+ continue
+ }
+
+ close(b.stopDone)
+
+ return b.GetRetry(), nil
+}
+
+// StopConsuming quits the loop
+func (b *Broker) StopConsuming() {
+ b.Broker.StopConsuming()
+
+ // Waiting for any tasks being processed to finish
+ <-b.stopDone
+}
+
+// Publish places a new message on the default queue or the queue pointed to
+// by the routing key
+func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
+ // Adjust routing key (this decides which queue the message will be published to)
+ b.AdjustRoutingKey(signature)
+
+ msg, err := json.Marshal(signature)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ topic := b.service.Topic(signature.RoutingKey)
+ defer topic.Stop()
+
+ // Check the ETA signature field, if it is set and it is in the future,
+ // delay the task
+ if signature.ETA != nil {
+ now := time.Now().UTC()
+
+ if signature.ETA.After(now) {
+ topic.PublishSettings.DelayThreshold = signature.ETA.Sub(now)
+ }
+ }
+
+ result := topic.Publish(ctx, &pubsub.Message{
+ Data: msg,
+ })
+
+ id, err := result.Get(ctx)
+ if err != nil {
+ log.ERROR.Printf("Error when sending a message: %v", err)
+ return err
+ }
+
+ log.INFO.Printf("Sending a message successfully, server-generated message ID %v", id)
+ return nil
+}
+
+// consumeOne processes a single message using TaskProcessor
+func (b *Broker) consumeOne(delivery *pubsub.Message, taskProcessor iface.TaskProcessor) {
+ if len(delivery.Data) == 0 {
+ delivery.Nack()
+ log.ERROR.Printf("received an empty message, the delivery was %v", delivery)
+ }
+
+ sig := new(tasks.Signature)
+ decoder := json.NewDecoder(bytes.NewBuffer(delivery.Data))
+ decoder.UseNumber()
+ if err := decoder.Decode(sig); err != nil {
+ delivery.Nack()
+ log.ERROR.Printf("unmarshal error. the delivery is %v", delivery)
+ }
+
+ // If the task is not registered return an error
+ // and leave the message in the queue
+ if !b.IsTaskRegistered(sig.Name) {
+ delivery.Nack()
+ log.ERROR.Printf("task %s is not registered", sig.Name)
+ }
+
+ err := taskProcessor.Process(sig)
+ if err != nil {
+ delivery.Nack()
+ log.ERROR.Printf("Failed process of task", err)
+ }
+
+ // Call Ack() after successfully consuming and processing the message
+ delivery.Ack()
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/iface/interfaces.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/iface/interfaces.go
new file mode 100644
index 000000000..fea8124ea
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/iface/interfaces.go
@@ -0,0 +1,27 @@
+package iface
+
+import (
+ "context"
+
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// Broker - a common interface for all brokers
+type Broker interface {
+ GetConfig() *config.Config
+ SetRegisteredTaskNames(names []string)
+ IsTaskRegistered(name string) bool
+ StartConsuming(consumerTag string, concurrency int, p TaskProcessor) (bool, error)
+ StopConsuming()
+ Publish(ctx context.Context, task *tasks.Signature) error
+ GetPendingTasks(queue string) ([]*tasks.Signature, error)
+ AdjustRoutingKey(s *tasks.Signature)
+}
+
+// TaskProcessor - can process a delivered task
+// This will probably always be a worker instance
+type TaskProcessor interface {
+ Process(signature *tasks.Signature) error
+ CustomQueue() string
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/redis/redis.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/redis/redis.go
new file mode 100644
index 000000000..065732bb2
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/redis/redis.go
@@ -0,0 +1,418 @@
+package redis
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/brokers/errs"
+ "github.com/RichardKnop/machinery/v1/brokers/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/RichardKnop/redsync"
+ "github.com/gomodule/redigo/redis"
+)
+
+var redisDelayedTasksKey = "delayed_tasks"
+
+// Broker represents a Redis broker
+type Broker struct {
+ common.Broker
+ common.RedisConnector
+ host string
+ password string
+ db int
+ pool *redis.Pool
+ consumingWG sync.WaitGroup // wait group to make sure whole consumption completes
+ processingWG sync.WaitGroup // use wait group to make sure task processing completes
+ delayedWG sync.WaitGroup
+ // If set, path to a socket file overrides hostname
+ socketPath string
+ redsync *redsync.Redsync
+ redisOnce sync.Once
+}
+
+// New creates new Broker instance
+func New(cnf *config.Config, host, password, socketPath string, db int) iface.Broker {
+ b := &Broker{Broker: common.NewBroker(cnf)}
+ b.host = host
+ b.db = db
+ b.password = password
+ b.socketPath = socketPath
+
+ return b
+}
+
+// StartConsuming enters a loop and waits for incoming messages
+func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
+ b.consumingWG.Add(1)
+ defer b.consumingWG.Done()
+
+ if concurrency < 1 {
+ concurrency = 1
+ }
+
+ b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)
+
+ conn := b.open()
+ defer conn.Close()
+
+ // Ping the server to make sure connection is live
+ _, err := conn.Do("PING")
+ if err != nil {
+ b.GetRetryFunc()(b.GetRetryStopChan())
+
+ // Return err if retry is still true.
+ // If retry is false, broker.StopConsuming() has been called and
+ // therefore Redis might have been stopped. Return nil exit
+ // StartConsuming()
+ if b.GetRetry() {
+ return b.GetRetry(), err
+ }
+ return b.GetRetry(), errs.ErrConsumerStopped
+ }
+
+ // Channel to which we will push tasks ready for processing by worker
+ deliveries := make(chan []byte, concurrency)
+ pool := make(chan struct{}, concurrency)
+
+ // initialize worker pool with maxWorkers workers
+ for i := 0; i < concurrency; i++ {
+ pool <- struct{}{}
+ }
+
+ // A receiving goroutine keeps popping messages from the queue by BLPOP
+ // If the message is valid and can be unmarshaled into a proper structure
+ // we send it to the deliveries channel
+ go func() {
+
+ log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C")
+
+ for {
+ select {
+ // A way to stop this goroutine from b.StopConsuming
+ case <-b.GetStopChan():
+ close(deliveries)
+ return
+ case <-pool:
+ task, _ := b.nextTask(getQueue(b.GetConfig(), taskProcessor))
+ //TODO: should this error be ignored?
+ if len(task) > 0 {
+ deliveries <- task
+ }
+
+ pool <- struct{}{}
+ }
+ }
+ }()
+
+ // A goroutine to watch for delayed tasks and push them to deliveries
+ // channel for consumption by the worker
+ b.delayedWG.Add(1)
+ go func() {
+ defer b.delayedWG.Done()
+
+ for {
+ select {
+ // A way to stop this goroutine from b.StopConsuming
+ case <-b.GetStopChan():
+ return
+ default:
+ task, err := b.nextDelayedTask(redisDelayedTasksKey)
+ if err != nil {
+ continue
+ }
+
+ signature := new(tasks.Signature)
+ decoder := json.NewDecoder(bytes.NewReader(task))
+ decoder.UseNumber()
+ if err := decoder.Decode(signature); err != nil {
+ log.ERROR.Print(errs.NewErrCouldNotUnmarshaTaskSignature(task, err))
+ }
+
+ if err := b.Publish(context.Background(), signature); err != nil {
+ log.ERROR.Print(err)
+ }
+ }
+ }
+ }()
+
+ if err := b.consume(deliveries, concurrency, taskProcessor); err != nil {
+ return b.GetRetry(), err
+ }
+
+ // Waiting for any tasks being processed to finish
+ b.processingWG.Wait()
+
+ return b.GetRetry(), nil
+}
+
+// StopConsuming quits the loop
+func (b *Broker) StopConsuming() {
+ b.Broker.StopConsuming()
+ // Waiting for the delayed tasks goroutine to have stopped
+ b.delayedWG.Wait()
+ // Waiting for consumption to finish
+ b.consumingWG.Wait()
+
+ if b.pool != nil {
+ b.pool.Close()
+ }
+}
+
+// Publish places a new message on the default queue
+func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
+ // Adjust routing key (this decides which queue the message will be published to)
+ b.Broker.AdjustRoutingKey(signature)
+
+ msg, err := json.Marshal(signature)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ conn := b.open()
+ defer conn.Close()
+
+ // Check the ETA signature field, if it is set and it is in the future,
+ // delay the task
+ if signature.ETA != nil {
+ now := time.Now().UTC()
+
+ if signature.ETA.After(now) {
+ score := signature.ETA.UnixNano()
+ _, err = conn.Do("ZADD", redisDelayedTasksKey, score, msg)
+ return err
+ }
+ }
+
+ _, err = conn.Do("RPUSH", signature.RoutingKey, msg)
+ return err
+}
+
+// GetPendingTasks returns a slice of task signatures waiting in the queue
+func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
+ conn := b.open()
+ defer conn.Close()
+
+ if queue == "" {
+ queue = b.GetConfig().DefaultQueue
+ }
+ dataBytes, err := conn.Do("LRANGE", queue, 0, -1)
+ if err != nil {
+ return nil, err
+ }
+ results, err := redis.ByteSlices(dataBytes, err)
+ if err != nil {
+ return nil, err
+ }
+
+ taskSignatures := make([]*tasks.Signature, len(results))
+ for i, result := range results {
+ signature := new(tasks.Signature)
+ decoder := json.NewDecoder(bytes.NewReader(result))
+ decoder.UseNumber()
+ if err := decoder.Decode(signature); err != nil {
+ return nil, err
+ }
+ taskSignatures[i] = signature
+ }
+ return taskSignatures, nil
+}
+
+// consume takes delivered messages from the channel and manages a worker pool
+// to process tasks concurrently
+func (b *Broker) consume(deliveries <-chan []byte, concurrency int, taskProcessor iface.TaskProcessor) error {
+ errorsChan := make(chan error, concurrency*2)
+ pool := make(chan struct{}, concurrency)
+
+ // init pool for Worker tasks execution, as many slots as Worker concurrency param
+ go func() {
+ for i := 0; i < concurrency; i++ {
+ pool <- struct{}{}
+ }
+ }()
+
+ for {
+ select {
+ case err := <-errorsChan:
+ return err
+ case d, open := <-deliveries:
+ if !open {
+ return nil
+ }
+ if concurrency > 0 {
+ // get execution slot from pool (blocks until one is available)
+ <-pool
+ }
+
+ b.processingWG.Add(1)
+
+ // Consume the task inside a goroutine so multiple tasks
+ // can be processed concurrently
+ go func() {
+ if err := b.consumeOne(d, taskProcessor); err != nil {
+ errorsChan <- err
+ }
+
+ b.processingWG.Done()
+
+ if concurrency > 0 {
+ // give slot back to pool
+ pool <- struct{}{}
+ }
+ }()
+ }
+ }
+}
+
+// consumeOne processes a single message using TaskProcessor
+func (b *Broker) consumeOne(delivery []byte, taskProcessor iface.TaskProcessor) error {
+ signature := new(tasks.Signature)
+ decoder := json.NewDecoder(bytes.NewReader(delivery))
+ decoder.UseNumber()
+ if err := decoder.Decode(signature); err != nil {
+ return errs.NewErrCouldNotUnmarshaTaskSignature(delivery, err)
+ }
+
+ // If the task is not registered, we requeue it,
+ // there might be different workers for processing specific tasks
+ if !b.IsTaskRegistered(signature.Name) {
+ log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery)
+
+ conn := b.open()
+ defer conn.Close()
+
+ conn.Do("RPUSH", getQueue(b.GetConfig(), taskProcessor), delivery)
+ return nil
+ }
+
+ log.DEBUG.Printf("Received new message: %s", delivery)
+
+ return taskProcessor.Process(signature)
+}
+
+// nextTask pops next available task from the default queue
+func (b *Broker) nextTask(queue string) (result []byte, err error) {
+ conn := b.open()
+ defer conn.Close()
+
+ pollPeriodMilliseconds := 1000 // default poll period for normal tasks
+ if b.GetConfig().Redis != nil {
+ configuredPollPeriod := b.GetConfig().Redis.NormalTasksPollPeriod
+ if configuredPollPeriod > 0 {
+ pollPeriodMilliseconds = configuredPollPeriod
+ }
+ }
+ pollPeriod := time.Duration(pollPeriodMilliseconds) * time.Millisecond
+
+ items, err := redis.ByteSlices(conn.Do("BLPOP", queue, pollPeriod.Seconds()))
+ if err != nil {
+ return []byte{}, err
+ }
+
+ // items[0] - the name of the key where an element was popped
+ // items[1] - the value of the popped element
+ if len(items) != 2 {
+ return []byte{}, redis.ErrNil
+ }
+
+ result = items[1]
+
+ return result, nil
+}
+
+// nextDelayedTask pops a value from the ZSET key using WATCH/MULTI/EXEC commands.
+// https://github.com/gomodule/redigo/blob/master/redis/zpop_example_test.go
+func (b *Broker) nextDelayedTask(key string) (result []byte, err error) {
+ conn := b.open()
+ defer conn.Close()
+
+ defer func() {
+ // Return connection to normal state on error.
+ // https://redis.io/commands/discard
+ if err != nil {
+ conn.Do("DISCARD")
+ }
+ }()
+
+ var (
+ items [][]byte
+ reply interface{}
+ )
+
+ pollPeriod := 500 // default poll period for delayed tasks
+ if b.GetConfig().Redis != nil {
+ configuredPollPeriod := b.GetConfig().Redis.DelayedTasksPollPeriod
+ // the default period is 0, which bombards redis with requests, despite
+ // our intention of doing the opposite
+ if configuredPollPeriod > 0 {
+ pollPeriod = configuredPollPeriod
+ }
+ }
+
+ for {
+ // Space out queries to ZSET so we don't bombard redis
+ // server with relentless ZRANGEBYSCOREs
+ time.Sleep(time.Duration(pollPeriod) * time.Millisecond)
+ if _, err = conn.Do("WATCH", key); err != nil {
+ return
+ }
+
+ now := time.Now().UTC().UnixNano()
+
+ // https://redis.io/commands/zrangebyscore
+ items, err = redis.ByteSlices(conn.Do(
+ "ZRANGEBYSCORE",
+ key,
+ 0,
+ now,
+ "LIMIT",
+ 0,
+ 1,
+ ))
+ if err != nil {
+ return
+ }
+ if len(items) != 1 {
+ err = redis.ErrNil
+ return
+ }
+
+ _ = conn.Send("MULTI")
+ _ = conn.Send("ZREM", key, items[0])
+ reply, err = conn.Do("EXEC")
+ if err != nil {
+ return
+ }
+
+ if reply != nil {
+ result = items[0]
+ break
+ }
+ }
+
+ return
+}
+
+// open returns or creates instance of Redis connection
+func (b *Broker) open() redis.Conn {
+ b.redisOnce.Do(func() {
+ b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig)
+ b.redsync = redsync.New([]redsync.Pool{b.pool})
+ })
+
+ return b.pool.Get()
+}
+
+func getQueue(config *config.Config, taskProcessor iface.TaskProcessor) string {
+ customQueue := taskProcessor.CustomQueue()
+ if customQueue == "" {
+ return config.DefaultQueue
+ }
+ return customQueue
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/brokers/sqs/sqs.go b/vendor/github.com/RichardKnop/machinery/v1/brokers/sqs/sqs.go
new file mode 100644
index 000000000..040830a5e
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/brokers/sqs/sqs.go
@@ -0,0 +1,361 @@
+package sqs
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/brokers/iface"
+ "github.com/RichardKnop/machinery/v1/common"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/sqs/sqsiface"
+
+ awssqs "github.com/aws/aws-sdk-go/service/sqs"
+)
+
+const (
+ maxAWSSQSDelay = time.Minute * 15 // Max supported SQS delay is 15 min: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html
+)
+
+// Broker represents a AWS SQS broker
+// There are examples on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sqs-example-create-queue.html
+type Broker struct {
+ common.Broker
+ processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal
+ receivingWG sync.WaitGroup
+ stopReceivingChan chan int
+ sess *session.Session
+ service sqsiface.SQSAPI
+ queueUrl *string
+}
+
+// New creates new Broker instance
+func New(cnf *config.Config) iface.Broker {
+ b := &Broker{Broker: common.NewBroker(cnf)}
+ if cnf.SQS != nil && cnf.SQS.Client != nil {
+ // Use provided *SQS client
+ b.service = cnf.SQS.Client
+ } else {
+ // Initialize a session that the SDK will use to load credentials from the shared credentials file, ~/.aws/credentials.
+ // See details on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html
+ // Also, env AWS_REGION is also required
+ b.sess = session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+ b.service = awssqs.New(b.sess)
+ }
+
+ return b
+}
+
+// GetPendingTasks returns a slice of task.Signatures waiting in the queue
+func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
+ return nil, errors.New("Not implemented")
+}
+
+// StartConsuming enters a loop and waits for incoming messages
+func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) {
+ b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor)
+ qURL := b.getQueueURL(taskProcessor)
+ //save it so that it can be used later when attempting to delete task
+ b.queueUrl = qURL
+
+ deliveries := make(chan *awssqs.ReceiveMessageOutput, concurrency)
+ pool := make(chan struct{}, concurrency)
+
+ // initialize worker pool with maxWorkers workers
+ for i := 0; i < concurrency; i++ {
+ pool <- struct{}{}
+ }
+ b.stopReceivingChan = make(chan int)
+ b.receivingWG.Add(1)
+
+ go func() {
+ defer b.receivingWG.Done()
+
+ log.INFO.Printf("[*] Waiting for messages on queue: %s. To exit press CTRL+C\n", *qURL)
+
+ for {
+ select {
+ // A way to stop this goroutine from b.StopConsuming
+ case <-b.stopReceivingChan:
+ close(deliveries)
+ return
+ case <-pool:
+ output, err := b.receiveMessage(qURL)
+ if err == nil && len(output.Messages) > 0 {
+ deliveries <- output
+
+ } else {
+ //return back to pool right away
+ pool <- struct{}{}
+ if err != nil {
+ log.ERROR.Printf("Queue consume error: %s", err)
+ }
+
+ }
+ }
+
+ }
+ }()
+
+ if err := b.consume(deliveries, concurrency, taskProcessor, pool); err != nil {
+ return b.GetRetry(), err
+ }
+
+ return b.GetRetry(), nil
+}
+
+// StopConsuming quits the loop
+func (b *Broker) StopConsuming() {
+ b.Broker.StopConsuming()
+
+ b.stopReceiving()
+
+ // Waiting for any tasks being processed to finish
+ b.processingWG.Wait()
+
+ // Waiting for the receiving goroutine to have stopped
+ b.receivingWG.Wait()
+}
+
+// Publish places a new message on the default queue
+func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error {
+ msg, err := json.Marshal(signature)
+ if err != nil {
+ return fmt.Errorf("JSON marshal error: %s", err)
+ }
+
+ // Check that signature.RoutingKey is set, if not switch to DefaultQueue
+ b.AdjustRoutingKey(signature)
+
+ MsgInput := &awssqs.SendMessageInput{
+ MessageBody: aws.String(string(msg)),
+ QueueUrl: aws.String(b.GetConfig().Broker + "/" + signature.RoutingKey),
+ }
+
+ // if this is a fifo queue, there needs to be some additional parameters.
+ if strings.HasSuffix(signature.RoutingKey, ".fifo") {
+ // Use Machinery's signature Task UUID as SQS Message Group ID.
+ MsgDedupID := signature.UUID
+ MsgInput.MessageDeduplicationId = aws.String(MsgDedupID)
+
+ // Do not Use Machinery's signature Group UUID as SQS Message Group ID, instead use BrokerMessageGroupId
+ MsgGroupID := signature.BrokerMessageGroupId
+ if MsgGroupID == "" {
+ return fmt.Errorf("please specify BrokerMessageGroupId attribute for task Signature when submitting a task to FIFO queue")
+ }
+ MsgInput.MessageGroupId = aws.String(MsgGroupID)
+ }
+
+ // Check the ETA signature field, if it is set and it is in the future,
+ // and is not a fifo queue, set a delay in seconds for the task.
+ if signature.ETA != nil && !strings.HasSuffix(signature.RoutingKey, ".fifo") {
+ now := time.Now().UTC()
+ delay := signature.ETA.Sub(now)
+ if delay > 0 {
+ if delay > maxAWSSQSDelay {
+ return errors.New("Max AWS SQS delay exceeded")
+ }
+ MsgInput.DelaySeconds = aws.Int64(int64(delay.Seconds()))
+ }
+ }
+
+ result, err := b.service.SendMessageWithContext(ctx, MsgInput)
+
+ if err != nil {
+ log.ERROR.Printf("Error when sending a message: %v", err)
+ return err
+
+ }
+ log.INFO.Printf("Sending a message successfully, the messageId is %v", *result.MessageId)
+ return nil
+
+}
+
+// consume is a method which keeps consuming deliveries from a channel, until there is an error or a stop signal
+func (b *Broker) consume(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}) error {
+
+ errorsChan := make(chan error)
+
+ for {
+ whetherContinue, err := b.consumeDeliveries(deliveries, concurrency, taskProcessor, pool, errorsChan)
+ if err != nil {
+ return err
+ }
+ if whetherContinue == false {
+ return nil
+ }
+ }
+}
+
+// consumeOne is a method consumes a delivery. If a delivery was consumed successfully, it will be deleted from AWS SQS
+func (b *Broker) consumeOne(delivery *awssqs.ReceiveMessageOutput, taskProcessor iface.TaskProcessor) error {
+ if len(delivery.Messages) == 0 {
+ log.ERROR.Printf("received an empty message, the delivery was %v", delivery)
+ return errors.New("received empty message, the delivery is " + delivery.GoString())
+ }
+
+ sig := new(tasks.Signature)
+ decoder := json.NewDecoder(strings.NewReader(*delivery.Messages[0].Body))
+ decoder.UseNumber()
+ if err := decoder.Decode(sig); err != nil {
+ log.ERROR.Printf("unmarshal error. the delivery is %v", delivery)
+ return err
+ }
+ if delivery.Messages[0].ReceiptHandle != nil {
+ sig.SQSReceiptHandle = *delivery.Messages[0].ReceiptHandle
+ }
+
+ // If the task is not registered return an error
+ // and leave the message in the queue
+ if !b.IsTaskRegistered(sig.Name) {
+ return fmt.Errorf("task %s is not registered", sig.Name)
+ }
+
+ err := taskProcessor.Process(sig)
+ if err != nil {
+ return err
+ }
+ // Delete message after successfully consuming and processing the message
+ if err = b.deleteOne(delivery); err != nil {
+ log.ERROR.Printf("error when deleting the delivery. delivery is %v, Error=%s", delivery, err)
+ }
+ return err
+}
+
+// deleteOne is a method delete a delivery from AWS SQS
+func (b *Broker) deleteOne(delivery *awssqs.ReceiveMessageOutput) error {
+ qURL := b.defaultQueueURL()
+ _, err := b.service.DeleteMessage(&awssqs.DeleteMessageInput{
+ QueueUrl: qURL,
+ ReceiptHandle: delivery.Messages[0].ReceiptHandle,
+ })
+
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// defaultQueueURL is a method returns the default queue url
+func (b *Broker) defaultQueueURL() *string {
+ if b.queueUrl != nil {
+ return b.queueUrl
+ } else {
+ return aws.String(b.GetConfig().Broker + "/" + b.GetConfig().DefaultQueue)
+ }
+
+}
+
+// receiveMessage is a method receives a message from specified queue url
+func (b *Broker) receiveMessage(qURL *string) (*awssqs.ReceiveMessageOutput, error) {
+ var waitTimeSeconds int
+ var visibilityTimeout *int
+ if b.GetConfig().SQS != nil {
+ waitTimeSeconds = b.GetConfig().SQS.WaitTimeSeconds
+ visibilityTimeout = b.GetConfig().SQS.VisibilityTimeout
+ } else {
+ waitTimeSeconds = 0
+ }
+ input := &awssqs.ReceiveMessageInput{
+ AttributeNames: []*string{
+ aws.String(awssqs.MessageSystemAttributeNameSentTimestamp),
+ },
+ MessageAttributeNames: []*string{
+ aws.String(awssqs.QueueAttributeNameAll),
+ },
+ QueueUrl: qURL,
+ MaxNumberOfMessages: aws.Int64(1),
+ WaitTimeSeconds: aws.Int64(int64(waitTimeSeconds)),
+ }
+ if visibilityTimeout != nil {
+ input.VisibilityTimeout = aws.Int64(int64(*visibilityTimeout))
+ }
+ result, err := b.service.ReceiveMessage(input)
+ if err != nil {
+ return nil, err
+ }
+ return result, err
+}
+
+// initializePool is a method which initializes concurrency pool
+func (b *Broker) initializePool(pool chan struct{}, concurrency int) {
+ for i := 0; i < concurrency; i++ {
+ pool <- struct{}{}
+ }
+}
+
+// consumeDeliveries is a method consuming deliveries from deliveries channel
+func (b *Broker) consumeDeliveries(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}, errorsChan chan error) (bool, error) {
+ select {
+ case err := <-errorsChan:
+ return false, err
+ case d := <-deliveries:
+
+ b.processingWG.Add(1)
+
+ // Consume the task inside a goroutine so multiple tasks
+ // can be processed concurrently
+ go func() {
+
+ if err := b.consumeOne(d, taskProcessor); err != nil {
+ errorsChan <- err
+ }
+
+ b.processingWG.Done()
+
+ if concurrency > 0 {
+ // give worker back to pool
+ pool <- struct{}{}
+ }
+ }()
+ case <-b.GetStopChan():
+ return false, nil
+ }
+ return true, nil
+}
+
+// continueReceivingMessages is a method returns a continue signal
+func (b *Broker) continueReceivingMessages(qURL *string, deliveries chan *awssqs.ReceiveMessageOutput) (bool, error) {
+ select {
+ // A way to stop this goroutine from b.StopConsuming
+ case <-b.stopReceivingChan:
+ return false, nil
+ default:
+ output, err := b.receiveMessage(qURL)
+ if err != nil {
+ return true, err
+ }
+ if len(output.Messages) == 0 {
+ return true, nil
+ }
+ go func() { deliveries <- output }()
+ }
+ return true, nil
+}
+
+// stopReceiving is a method sending a signal to stopReceivingChan
+func (b *Broker) stopReceiving() {
+ // Stop the receiving goroutine
+ b.stopReceivingChan <- 1
+}
+
+// getQueueURL is a method returns that returns queueURL first by checking if custom queue was set and usign it
+// otherwise using default queueName from config
+func (b *Broker) getQueueURL(taskProcessor iface.TaskProcessor) *string {
+ queueName := b.GetConfig().DefaultQueue
+ if taskProcessor.CustomQueue() != "" {
+ queueName = taskProcessor.CustomQueue()
+ }
+
+ return aws.String(b.GetConfig().Broker + "/" + queueName)
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/common/amqp.go b/vendor/github.com/RichardKnop/machinery/v1/common/amqp.go
new file mode 100644
index 000000000..1e0f3e48f
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/common/amqp.go
@@ -0,0 +1,129 @@
+package common
+
+import (
+ "crypto/tls"
+ "fmt"
+
+ "github.com/streadway/amqp"
+)
+
+// AMQPConnector ...
+type AMQPConnector struct{}
+
+// Connect opens a connection to RabbitMQ, declares an exchange, opens a channel,
+// declares and binds the queue and enables publish notifications
+func (ac *AMQPConnector) Connect(url string, tlsConfig *tls.Config, exchange, exchangeType, queueName string, queueDurable, queueDelete bool, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*amqp.Connection, *amqp.Channel, amqp.Queue, <-chan amqp.Confirmation, <-chan *amqp.Error, error) {
+ // Connect to server
+ conn, channel, err := ac.Open(url, tlsConfig)
+ if err != nil {
+ return nil, nil, amqp.Queue{}, nil, nil, err
+ }
+
+ if exchange != "" {
+ // Declare an exchange
+ if err = channel.ExchangeDeclare(
+ exchange, // name of the exchange
+ exchangeType, // type
+ true, // durable
+ false, // delete when complete
+ false, // internal
+ false, // noWait
+ exchangeDeclareArgs, // arguments
+ ); err != nil {
+ return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Exchange declare error: %s", err)
+ }
+ }
+
+ var queue amqp.Queue
+ if queueName != "" {
+ // Declare a queue
+ queue, err = channel.QueueDeclare(
+ queueName, // name
+ queueDurable, // durable
+ queueDelete, // delete when unused
+ false, // exclusive
+ false, // no-wait
+ queueDeclareArgs, // arguments
+ )
+ if err != nil {
+ return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Queue declare error: %s", err)
+ }
+
+ // Bind the queue
+ if err = channel.QueueBind(
+ queue.Name, // name of the queue
+ queueBindingKey, // binding key
+ exchange, // source exchange
+ false, // noWait
+ queueBindingArgs, // arguments
+ ); err != nil {
+ return conn, channel, queue, nil, nil, fmt.Errorf("Queue bind error: %s", err)
+ }
+ }
+
+ // Enable publish confirmations
+ if err = channel.Confirm(false); err != nil {
+ return conn, channel, queue, nil, nil, fmt.Errorf("Channel could not be put into confirm mode: %s", err)
+ }
+
+ return conn, channel, queue, channel.NotifyPublish(make(chan amqp.Confirmation, 1)), conn.NotifyClose(make(chan *amqp.Error, 1)), nil
+}
+
+// DeleteQueue deletes a queue by name
+func (ac *AMQPConnector) DeleteQueue(channel *amqp.Channel, queueName string) error {
+ // First return value is number of messages removed
+ _, err := channel.QueueDelete(
+ queueName, // name
+ false, // ifUnused
+ false, // ifEmpty
+ false, // noWait
+ )
+
+ return err
+}
+
+// InspectQueue provides information about a specific queue
+func (*AMQPConnector) InspectQueue(channel *amqp.Channel, queueName string) (*amqp.Queue, error) {
+ queueState, err := channel.QueueInspect(queueName)
+ if err != nil {
+ return nil, fmt.Errorf("Queue inspect error: %s", err)
+ }
+
+ return &queueState, nil
+}
+
+// Open new RabbitMQ connection
+func (ac *AMQPConnector) Open(url string, tlsConfig *tls.Config) (*amqp.Connection, *amqp.Channel, error) {
+ // Connect
+ // From amqp docs: DialTLS will use the provided tls.Config when it encounters an amqps:// scheme
+ // and will dial a plain connection when it encounters an amqp:// scheme.
+ conn, err := amqp.DialTLS(url, tlsConfig)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Dial error: %s", err)
+ }
+
+ // Open a channel
+ channel, err := conn.Channel()
+ if err != nil {
+ return nil, nil, fmt.Errorf("Open channel error: %s", err)
+ }
+
+ return conn, channel, nil
+}
+
+// Close connection
+func (ac *AMQPConnector) Close(channel *amqp.Channel, conn *amqp.Connection) error {
+ if channel != nil {
+ if err := channel.Close(); err != nil {
+ return fmt.Errorf("Close channel error: %s", err)
+ }
+ }
+
+ if conn != nil {
+ if err := conn.Close(); err != nil {
+ return fmt.Errorf("Close connection error: %s", err)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/common/backend.go b/vendor/github.com/RichardKnop/machinery/v1/common/backend.go
new file mode 100644
index 000000000..6c6b38be0
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/common/backend.go
@@ -0,0 +1,25 @@
+package common
+
+import (
+ "github.com/RichardKnop/machinery/v1/config"
+)
+
+// Backend represents a base backend structure
+type Backend struct {
+ cnf *config.Config
+}
+
+// NewBackend creates new Backend instance
+func NewBackend(cnf *config.Config) Backend {
+ return Backend{cnf: cnf}
+}
+
+// GetConfig returns config
+func (b *Backend) GetConfig() *config.Config {
+ return b.cnf
+}
+
+// IsAMQP ...
+func (b *Backend) IsAMQP() bool {
+ return false
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/common/broker.go b/vendor/github.com/RichardKnop/machinery/v1/common/broker.go
new file mode 100644
index 000000000..e79a5a7e4
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/common/broker.go
@@ -0,0 +1,121 @@
+package common
+
+import (
+ "errors"
+
+ "github.com/RichardKnop/machinery/v1/brokers/iface"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/retry"
+ "github.com/RichardKnop/machinery/v1/tasks"
+)
+
+// Broker represents a base broker structure
+type Broker struct {
+ cnf *config.Config
+ registeredTaskNames []string
+ retry bool
+ retryFunc func(chan int)
+ retryStopChan chan int
+ stopChan chan int
+}
+
+// NewBroker creates new Broker instance
+func NewBroker(cnf *config.Config) Broker {
+ return Broker{
+ cnf: cnf,
+ retry: true,
+ stopChan: make(chan int),
+ retryStopChan: make(chan int),
+ }
+}
+
+// GetConfig returns config
+func (b *Broker) GetConfig() *config.Config {
+ return b.cnf
+}
+
+// GetRetry ...
+func (b *Broker) GetRetry() bool {
+ return b.retry
+}
+
+// GetRetryFunc ...
+func (b *Broker) GetRetryFunc() func(chan int) {
+ return b.retryFunc
+}
+
+// GetRetryStopChan ...
+func (b *Broker) GetRetryStopChan() chan int {
+ return b.retryStopChan
+}
+
+// GetStopChan ...
+func (b *Broker) GetStopChan() chan int {
+ return b.stopChan
+}
+
+// Publish places a new message on the default queue
+func (b *Broker) Publish(signature *tasks.Signature) error {
+ return errors.New("Not implemented")
+}
+
+// SetRegisteredTaskNames sets registered task names
+func (b *Broker) SetRegisteredTaskNames(names []string) {
+ b.registeredTaskNames = names
+}
+
+// IsTaskRegistered returns true if the task is registered with this broker
+func (b *Broker) IsTaskRegistered(name string) bool {
+ for _, registeredTaskName := range b.registeredTaskNames {
+ if registeredTaskName == name {
+ return true
+ }
+ }
+ return false
+}
+
+// GetPendingTasks returns a slice of task.Signatures waiting in the queue
+func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) {
+ return nil, errors.New("Not implemented")
+}
+
+// StartConsuming is a common part of StartConsuming method
+func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) {
+ if b.retryFunc == nil {
+ b.retryFunc = retry.Closure()
+ }
+
+}
+
+// StopConsuming is a common part of StopConsuming
+func (b *Broker) StopConsuming() {
+ // Do not retry from now on
+ b.retry = false
+ // Stop the retry closure earlier
+ select {
+ case b.retryStopChan <- 1:
+ log.WARNING.Print("Stopping retry closure.")
+ default:
+ }
+ // Notifying the stop channel stops consuming of messages
+ close(b.stopChan)
+ log.WARNING.Print("Stop channel")
+}
+
+// GetRegisteredTaskNames returns registered tasks names
+func (b *Broker) GetRegisteredTaskNames() []string {
+ return b.registeredTaskNames
+}
+
+// AdjustRoutingKey makes sure the routing key is correct.
+// If the routing key is an empty string:
+// a) set it to binding key for direct exchange type
+// b) set it to default queue name
+func (b *Broker) AdjustRoutingKey(s *tasks.Signature) {
+ if s.RoutingKey != "" {
+ return
+ }
+
+ s.RoutingKey = b.GetConfig().DefaultQueue
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/common/redis.go b/vendor/github.com/RichardKnop/machinery/v1/common/redis.go
new file mode 100644
index 000000000..1fc7ed25e
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/common/redis.go
@@ -0,0 +1,84 @@
+package common
+
+import (
+ "crypto/tls"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/gomodule/redigo/redis"
+)
+
+var (
+ defaultConfig = &config.RedisConfig{
+ MaxIdle: 3,
+ IdleTimeout: 240,
+ ReadTimeout: 15,
+ WriteTimeout: 15,
+ ConnectTimeout: 15,
+ NormalTasksPollPeriod: 1000,
+ DelayedTasksPollPeriod: 20,
+ }
+)
+
+// RedisConnector ...
+type RedisConnector struct{}
+
+// NewPool returns a new pool of Redis connections
+func (rc *RedisConnector) NewPool(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) *redis.Pool {
+ if cnf == nil {
+ cnf = defaultConfig
+ }
+ return &redis.Pool{
+ MaxIdle: cnf.MaxIdle,
+ IdleTimeout: time.Duration(cnf.IdleTimeout) * time.Second,
+ MaxActive: cnf.MaxActive,
+ Wait: cnf.Wait,
+ Dial: func() (redis.Conn, error) {
+ c, err := rc.open(socketPath, host, password, db, cnf, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if db != 0 {
+ _, err = c.Do("SELECT", db)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return c, err
+ },
+ // PINGs connections that have been idle more than 10 seconds
+ TestOnBorrow: func(c redis.Conn, t time.Time) error {
+ if time.Since(t) < time.Duration(10*time.Second) {
+ return nil
+ }
+ _, err := c.Do("PING")
+ return err
+ },
+ }
+}
+
+// Open a new Redis connection
+func (rc *RedisConnector) open(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) (redis.Conn, error) {
+ var opts = []redis.DialOption{
+ redis.DialDatabase(db),
+ redis.DialReadTimeout(time.Duration(cnf.ReadTimeout) * time.Second),
+ redis.DialWriteTimeout(time.Duration(cnf.WriteTimeout) * time.Second),
+ redis.DialConnectTimeout(time.Duration(cnf.ConnectTimeout) * time.Second),
+ }
+
+ if tlsConfig != nil {
+ opts = append(opts, redis.DialTLSConfig(tlsConfig), redis.DialUseTLS(true))
+ }
+
+ if password != "" {
+ opts = append(opts, redis.DialPassword(password))
+ }
+
+ if socketPath != "" {
+ return redis.Dial("unix", socketPath, opts...)
+ }
+
+ return redis.Dial("tcp", host, opts...)
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/config/config.go b/vendor/github.com/RichardKnop/machinery/v1/config/config.go
new file mode 100644
index 000000000..f493f7db6
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/config/config.go
@@ -0,0 +1,161 @@
+package config
+
+import (
+ "crypto/tls"
+ "fmt"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/pubsub"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+ "github.com/aws/aws-sdk-go/service/sqs"
+ "go.mongodb.org/mongo-driver/mongo"
+)
+
+const (
+ // DefaultResultsExpireIn is a default time used to expire task states and group metadata from the backend
+ DefaultResultsExpireIn = 3600
+)
+
+var (
+ // Start with sensible default values
+ defaultCnf = &Config{
+ Broker: "amqp://guest:guest@localhost:5672/",
+ DefaultQueue: "machinery_tasks",
+ ResultBackend: "amqp://guest:guest@localhost:5672/",
+ ResultsExpireIn: DefaultResultsExpireIn,
+ AMQP: &AMQPConfig{
+ Exchange: "machinery_exchange",
+ ExchangeType: "direct",
+ BindingKey: "machinery_task",
+ PrefetchCount: 3,
+ },
+ DynamoDB: &DynamoDBConfig{
+ TaskStatesTable: "task_states",
+ GroupMetasTable: "group_metas",
+ },
+ Redis: &RedisConfig{
+ MaxIdle: 3,
+ IdleTimeout: 240,
+ ReadTimeout: 15,
+ WriteTimeout: 15,
+ ConnectTimeout: 15,
+ NormalTasksPollPeriod: 1000,
+ DelayedTasksPollPeriod: 20,
+ },
+ GCPPubSub: &GCPPubSubConfig{
+ Client: nil,
+ },
+ }
+
+ reloadDelay = time.Second * 10
+)
+
+// Config holds all configuration for our program
+type Config struct {
+ Broker string `yaml:"broker" envconfig:"BROKER"`
+ DefaultQueue string `yaml:"default_queue" envconfig:"DEFAULT_QUEUE"`
+ ResultBackend string `yaml:"result_backend" envconfig:"RESULT_BACKEND"`
+ ResultsExpireIn int `yaml:"results_expire_in" envconfig:"RESULTS_EXPIRE_IN"`
+ AMQP *AMQPConfig `yaml:"amqp"`
+ SQS *SQSConfig `yaml:"sqs"`
+ Redis *RedisConfig `yaml:"redis"`
+ GCPPubSub *GCPPubSubConfig `yaml:"-" ignored:"true"`
+ MongoDB *MongoDBConfig `yaml:"-" ignored:"true"`
+ TLSConfig *tls.Config
+ // NoUnixSignals - when set disables signal handling in machinery
+ NoUnixSignals bool `yaml:"no_unix_signals" envconfig:"NO_UNIX_SIGNALS"`
+ DynamoDB *DynamoDBConfig `yaml:"dynamodb"`
+}
+
+// QueueBindingArgs arguments which are used when binding to the exchange
+type QueueBindingArgs map[string]interface{}
+
+// AMQPConfig wraps RabbitMQ related configuration
+type AMQPConfig struct {
+ Exchange string `yaml:"exchange" envconfig:"AMQP_EXCHANGE"`
+ ExchangeType string `yaml:"exchange_type" envconfig:"AMQP_EXCHANGE_TYPE"`
+ QueueBindingArgs QueueBindingArgs `yaml:"queue_binding_args" envconfig:"AMQP_QUEUE_BINDING_ARGS"`
+ BindingKey string `yaml:"binding_key" envconfig:"AMQP_BINDING_KEY"`
+ PrefetchCount int `yaml:"prefetch_count" envconfig:"AMQP_PREFETCH_COUNT"`
+ AutoDelete bool `yaml:"auto_delete" envconfig:"AMQP_AUTO_DELETE"`
+}
+
+// DynamoDBConfig wraps DynamoDB related configuration
+type DynamoDBConfig struct {
+ Client *dynamodb.DynamoDB
+ TaskStatesTable string `yaml:"task_states_table" envconfig:"TASK_STATES_TABLE"`
+ GroupMetasTable string `yaml:"group_metas_table" envconfig:"GROUP_METAS_TABLE"`
+}
+
+// SQSConfig wraps SQS related configuration
+type SQSConfig struct {
+ Client *sqs.SQS
+ WaitTimeSeconds int `yaml:"receive_wait_time_seconds" envconfig:"SQS_WAIT_TIME_SECONDS"`
+ // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html
+ // visibility timeout should default to nil to use the overall visibility timeout for the queue
+ VisibilityTimeout *int `yaml:"receive_visibility_timeout" envconfig:"SQS_VISIBILITY_TIMEOUT"`
+}
+
+// RedisConfig ...
+type RedisConfig struct {
+ // Maximum number of idle connections in the pool.
+ MaxIdle int `yaml:"max_idle" envconfig:"REDIS_MAX_IDLE"`
+
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActive int `yaml:"max_active" envconfig:"REDIS_MAX_ACTIVE"`
+
+ // Close connections after remaining idle for this duration in seconds. If the value
+ // is zero, then idle connections are not closed. Applications should set
+ // the timeout to a value less than the server's timeout.
+ IdleTimeout int `yaml:"max_idle_timeout" envconfig:"REDIS_IDLE_TIMEOUT"`
+
+ // If Wait is true and the pool is at the MaxActive limit, then Get() waits
+ // for a connection to be returned to the pool before returning.
+ Wait bool `yaml:"wait" envconfig:"REDIS_WAIT"`
+
+ // ReadTimeout specifies the timeout in seconds for reading a single command reply.
+ ReadTimeout int `yaml:"read_timeout" envconfig:"REDIS_READ_TIMEOUT"`
+
+ // WriteTimeout specifies the timeout in seconds for writing a single command.
+ WriteTimeout int `yaml:"write_timeout" envconfig:"REDIS_WRITE_TIMEOUT"`
+
+ // ConnectTimeout specifies the timeout in seconds for connecting to the Redis server when
+ // no DialNetDial option is specified.
+ ConnectTimeout int `yaml:"connect_timeout" envconfig:"REDIS_CONNECT_TIMEOUT"`
+
+ // NormalTasksPollPeriod specifies the period in milliseconds when polling redis for normal tasks
+ NormalTasksPollPeriod int `yaml:"normal_tasks_poll_period" envconfig:"REDIS_NORMAL_TASKS_POLL_PERIOD"`
+
+ // DelayedTasksPollPeriod specifies the period in milliseconds when polling redis for delayed tasks
+ DelayedTasksPollPeriod int `yaml:"delayed_tasks_poll_period" envconfig:"REDIS_DELAYED_TASKS_POLL_PERIOD"`
+}
+
+// GCPPubSubConfig wraps GCP PubSub related configuration
+type GCPPubSubConfig struct {
+ Client *pubsub.Client
+ MaxExtension time.Duration
+}
+
+// MongoDBConfig ...
+type MongoDBConfig struct {
+ Client *mongo.Client
+ Database string
+}
+
+// Decode from yaml to map (any field whose type or pointer-to-type implements
+// envconfig.Decoder can control its own deserialization)
+func (args *QueueBindingArgs) Decode(value string) error {
+ pairs := strings.Split(value, ",")
+ mp := make(map[string]interface{}, len(pairs))
+ for _, pair := range pairs {
+ kvpair := strings.Split(pair, ":")
+ if len(kvpair) != 2 {
+ return fmt.Errorf("invalid map item: %q", pair)
+ }
+ mp[kvpair[0]] = kvpair[1]
+ }
+ *args = QueueBindingArgs(mp)
+ return nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/config/env.go b/vendor/github.com/RichardKnop/machinery/v1/config/env.go
new file mode 100644
index 000000000..065bac800
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/config/env.go
@@ -0,0 +1,58 @@
+package config
+
+import (
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/kelseyhightower/envconfig"
+)
+
+// NewFromEnvironment creates a config object from environment variables
+func NewFromEnvironment(keepReloading bool) (*Config, error) {
+ cnf, err := fromEnvironment()
+ if err != nil {
+ return nil, err
+ }
+
+ log.INFO.Print("Successfully loaded config from the environment")
+
+ if keepReloading {
+ // Open a goroutine to watch remote changes forever
+ go func() {
+ for {
+ // Delay after each request
+ time.Sleep(reloadDelay)
+
+ // Attempt to reload the config
+ newCnf, newErr := fromEnvironment()
+ if newErr != nil {
+ log.WARNING.Printf("Failed to reload config from the environment: %v", newErr)
+ continue
+ }
+
+ *cnf = *newCnf
+ // log.INFO.Printf("Successfully reloaded config from the environment")
+ }
+ }()
+ }
+
+ return cnf, nil
+}
+
+func fromEnvironment() (*Config, error) {
+ loadedCnf, cnf := new(Config), new(Config)
+ *cnf = *defaultCnf
+
+ if err := envconfig.Process("", cnf); err != nil {
+ return nil, err
+ }
+ if err := envconfig.Process("", loadedCnf); err != nil {
+ return nil, err
+ }
+
+ if loadedCnf.AMQP == nil {
+ cnf.AMQP = nil
+ }
+
+ return cnf, nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/config/file.go b/vendor/github.com/RichardKnop/machinery/v1/config/file.go
new file mode 100644
index 000000000..de3d0ce86
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/config/file.go
@@ -0,0 +1,83 @@
+package config
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/log"
+ "gopkg.in/yaml.v2"
+)
+
+// NewFromYaml creates a config object from YAML file
+func NewFromYaml(cnfPath string, keepReloading bool) (*Config, error) {
+ cnf, err := fromFile(cnfPath)
+ if err != nil {
+ return nil, err
+ }
+
+ log.INFO.Printf("Successfully loaded config from file %s", cnfPath)
+
+ if keepReloading {
+ // Open a goroutine to watch remote changes forever
+ go func() {
+ for {
+ // Delay after each request
+ time.Sleep(reloadDelay)
+
+ // Attempt to reload the config
+ newCnf, newErr := fromFile(cnfPath)
+ if newErr != nil {
+ log.WARNING.Printf("Failed to reload config from file %s: %v", cnfPath, newErr)
+ continue
+ }
+
+ *cnf = *newCnf
+ // log.INFO.Printf("Successfully reloaded config from file %s", cnfPath)
+ }
+ }()
+ }
+
+ return cnf, nil
+}
+
+// ReadFromFile reads data from a file
+func ReadFromFile(cnfPath string) ([]byte, error) {
+ file, err := os.Open(cnfPath)
+
+ // Config file not found
+ if err != nil {
+ return nil, fmt.Errorf("Open file error: %s", err)
+ }
+
+ // Config file found, let's try to read it
+ data := make([]byte, 1000)
+ count, err := file.Read(data)
+ if err != nil {
+ return nil, fmt.Errorf("Read from file error: %s", err)
+ }
+
+ return data[:count], nil
+}
+
+func fromFile(cnfPath string) (*Config, error) {
+ loadedCnf, cnf := new(Config), new(Config)
+ *cnf = *defaultCnf
+
+ data, err := ReadFromFile(cnfPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := yaml.Unmarshal(data, cnf); err != nil {
+ return nil, fmt.Errorf("Unmarshal YAML error: %s", err)
+ }
+ if err := yaml.Unmarshal(data, loadedCnf); err != nil {
+ return nil, fmt.Errorf("Unmarshal YAML error: %s", err)
+ }
+ if loadedCnf.AMQP == nil {
+ cnf.AMQP = nil
+ }
+
+ return cnf, nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/config/test.env b/vendor/github.com/RichardKnop/machinery/v1/config/test.env
new file mode 100644
index 000000000..cfbb935a2
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/config/test.env
@@ -0,0 +1,9 @@
+BROKER=broker
+DEFAULT_QUEUE=default_queue
+RESULT_BACKEND=result_backend
+RESULTS_EXPIRE_IN=123456
+AMQP_BINDING_KEY=binding_key
+AMQP_EXCHANGE=exchange
+AMQP_EXCHANGE_TYPE=exchange_type
+AMQP_PREFETCH_COUNT=123
+AMQP_QUEUE_BINDING_ARGS=image-type:png,x-match:any
diff --git a/vendor/github.com/RichardKnop/machinery/v1/config/testconfig.yml b/vendor/github.com/RichardKnop/machinery/v1/config/testconfig.yml
new file mode 100644
index 000000000..236c416a0
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/config/testconfig.yml
@@ -0,0 +1,13 @@
+---
+broker: broker
+default_queue: default_queue
+result_backend: result_backend
+results_expire_in: 123456
+amqp:
+ binding_key: binding_key
+ exchange: exchange
+ exchange_type: exchange_type
+ prefetch_count: 123
+ queue_binding_args:
+ image-type: png
+ x-match: any
diff --git a/vendor/github.com/RichardKnop/machinery/v1/factories.go b/vendor/github.com/RichardKnop/machinery/v1/factories.go
new file mode 100644
index 000000000..f84754b1f
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/factories.go
@@ -0,0 +1,268 @@
+package machinery
+
+import (
+ "errors"
+ "fmt"
+ neturl "net/url"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/RichardKnop/machinery/v1/config"
+
+ amqpbroker "github.com/RichardKnop/machinery/v1/brokers/amqp"
+ eagerbroker "github.com/RichardKnop/machinery/v1/brokers/eager"
+ gcppubsubbroker "github.com/RichardKnop/machinery/v1/brokers/gcppubsub"
+ brokeriface "github.com/RichardKnop/machinery/v1/brokers/iface"
+ redisbroker "github.com/RichardKnop/machinery/v1/brokers/redis"
+ sqsbroker "github.com/RichardKnop/machinery/v1/brokers/sqs"
+
+ amqpbackend "github.com/RichardKnop/machinery/v1/backends/amqp"
+ dynamobackend "github.com/RichardKnop/machinery/v1/backends/dynamodb"
+ eagerbackend "github.com/RichardKnop/machinery/v1/backends/eager"
+ backendiface "github.com/RichardKnop/machinery/v1/backends/iface"
+ memcachebackend "github.com/RichardKnop/machinery/v1/backends/memcache"
+ mongobackend "github.com/RichardKnop/machinery/v1/backends/mongo"
+ nullbackend "github.com/RichardKnop/machinery/v1/backends/null"
+ redisbackend "github.com/RichardKnop/machinery/v1/backends/redis"
+)
+
+// BrokerFactory creates a new object of iface.Broker
+// Currently only AMQP/S broker is supported
+func BrokerFactory(cnf *config.Config) (brokeriface.Broker, error) {
+ if strings.HasPrefix(cnf.Broker, "amqp://") {
+ return amqpbroker.New(cnf), nil
+ }
+
+ if strings.HasPrefix(cnf.Broker, "amqps://") {
+ return amqpbroker.New(cnf), nil
+ }
+
+ if strings.HasPrefix(cnf.Broker, "redis://") {
+ parts := strings.Split(cnf.Broker, "redis://")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf(
+ "Redis broker connection string should be in format redis://host:port, instead got %s",
+ cnf.Broker,
+ )
+ }
+
+ redisHost, redisPassword, redisDB, err := ParseRedisURL(cnf.Broker)
+ if err != nil {
+ return nil, err
+ }
+ return redisbroker.New(cnf, redisHost, redisPassword, "", redisDB), nil
+ }
+
+ if strings.HasPrefix(cnf.Broker, "redis+socket://") {
+ redisSocket, redisPassword, redisDB, err := ParseRedisSocketURL(cnf.Broker)
+ if err != nil {
+ return nil, err
+ }
+
+ return redisbroker.New(cnf, "", redisPassword, redisSocket, redisDB), nil
+ }
+
+ if strings.HasPrefix(cnf.Broker, "eager") {
+ return eagerbroker.New(), nil
+ }
+
+ if _, ok := os.LookupEnv("DISABLE_STRICT_SQS_CHECK"); ok {
+ //disable SQS name check, so that users can use this with local simulated SQS
+ //where sql broker url might not start with https://sqs
+
+ //even when disabling strict SQS naming check, make sure its still a valid http URL
+ if strings.HasPrefix(cnf.Broker, "https://") || strings.HasPrefix(cnf.Broker, "http://") {
+ return sqsbroker.New(cnf), nil
+ }
+ } else {
+ if strings.HasPrefix(cnf.Broker, "https://sqs") {
+ return sqsbroker.New(cnf), nil
+ }
+ }
+
+ if strings.HasPrefix(cnf.Broker, "gcppubsub://") {
+ projectID, subscriptionName, err := ParseGCPPubSubURL(cnf.Broker)
+ if err != nil {
+ return nil, err
+ }
+ return gcppubsubbroker.New(cnf, projectID, subscriptionName)
+ }
+
+ return nil, fmt.Errorf("Factory failed with broker URL: %v", cnf.Broker)
+}
+
+// BackendFactory creates a new object of backends.Interface
+// Currently supported backends are AMQP/S and Memcache
+func BackendFactory(cnf *config.Config) (backendiface.Backend, error) {
+ if strings.HasPrefix(cnf.ResultBackend, "amqp://") {
+ return amqpbackend.New(cnf), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "amqps://") {
+ return amqpbackend.New(cnf), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "memcache://") {
+ parts := strings.Split(cnf.ResultBackend, "memcache://")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf(
+ "Memcache result backend connection string should be in format memcache://server1:port,server2:port, instead got %s",
+ cnf.ResultBackend,
+ )
+ }
+ servers := strings.Split(parts[1], ",")
+ return memcachebackend.New(cnf, servers), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "redis://") {
+ redisHost, redisPassword, redisDB, err := ParseRedisURL(cnf.ResultBackend)
+ if err != nil {
+ return nil, err
+ }
+
+ return redisbackend.New(cnf, redisHost, redisPassword, "", redisDB), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "redis+socket://") {
+ redisSocket, redisPassword, redisDB, err := ParseRedisSocketURL(cnf.ResultBackend)
+ if err != nil {
+ return nil, err
+ }
+
+ return redisbackend.New(cnf, "", redisPassword, redisSocket, redisDB), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "mongodb://") ||
+ strings.HasPrefix(cnf.ResultBackend, "mongodb+srv://") {
+ return mongobackend.New(cnf)
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "eager") {
+ return eagerbackend.New(), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "null") {
+ return nullbackend.New(), nil
+ }
+
+ if strings.HasPrefix(cnf.ResultBackend, "https://dynamodb") {
+ return dynamobackend.New(cnf), nil
+ }
+
+ return nil, fmt.Errorf("Factory failed with result backend: %v", cnf.ResultBackend)
+}
+
+// ParseRedisURL ...
+func ParseRedisURL(url string) (host, password string, db int, err error) {
+ // redis://pwd@host/db
+
+ var u *neturl.URL
+ u, err = neturl.Parse(url)
+ if err != nil {
+ return
+ }
+ if u.Scheme != "redis" {
+ err = errors.New("No redis scheme found")
+ return
+ }
+
+ if u.User != nil {
+ var exists bool
+ password, exists = u.User.Password()
+ if !exists {
+ password = u.User.Username()
+ }
+ }
+
+ host = u.Host
+
+ parts := strings.Split(u.Path, "/")
+ if len(parts) == 1 {
+ db = 0 //default redis db
+ } else {
+ db, err = strconv.Atoi(parts[1])
+ if err != nil {
+ db, err = 0, nil //ignore err here
+ }
+ }
+
+ return
+}
+
+// ParseRedisSocketURL extracts Redis connection options from a URL with the
+// redis+socket:// scheme. This scheme is not standard (or even de facto) and
+// is used as a transitional mechanism until the the config package gains the
+// proper facilities to support socket-based connections.
+func ParseRedisSocketURL(url string) (path, password string, db int, err error) {
+ parts := strings.Split(url, "redis+socket://")
+ if parts[0] != "" {
+ err = errors.New("No redis scheme found")
+ return
+ }
+
+ // redis+socket://password@/path/to/file.soc:/db
+
+ if len(parts) != 2 {
+ err = fmt.Errorf("Redis socket connection string should be in format redis+socket://password@/path/to/file.sock:/db, instead got %s", url)
+ return
+ }
+
+ remainder := parts[1]
+
+ // Extract password if any
+ parts = strings.SplitN(remainder, "@", 2)
+ if len(parts) == 2 {
+ password = parts[0]
+ remainder = parts[1]
+ } else {
+ remainder = parts[0]
+ }
+
+ // Extract path
+ parts = strings.SplitN(remainder, ":", 2)
+ path = parts[0]
+ if path == "" {
+ err = fmt.Errorf("Redis socket connection string should be in format redis+socket://password@/path/to/file.sock:/db, instead got %s", url)
+ return
+ }
+ if len(parts) == 2 {
+ remainder = parts[1]
+ }
+
+ // Extract DB if any
+ parts = strings.SplitN(remainder, "/", 2)
+ if len(parts) == 2 {
+ db, _ = strconv.Atoi(parts[1])
+ }
+
+ return
+}
+
+// ParseGCPPubSubURL Parse GCP Pub/Sub URL
+// url: gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME
+func ParseGCPPubSubURL(url string) (string, string, error) {
+ parts := strings.Split(url, "gcppubsub://")
+ if parts[0] != "" {
+ return "", "", errors.New("No gcppubsub scheme found")
+ }
+
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url)
+ }
+
+ remainder := parts[1]
+
+ parts = strings.Split(remainder, "/")
+ if len(parts) == 2 {
+ if len(parts[0]) == 0 {
+ return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url)
+ }
+ if len(parts[1]) == 0 {
+ return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url)
+ }
+ return parts[0], parts[1], nil
+ }
+
+ return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url)
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/log/log.go b/vendor/github.com/RichardKnop/machinery/v1/log/log.go
new file mode 100644
index 000000000..12f382c61
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/log/log.go
@@ -0,0 +1,54 @@
+package log
+
+import (
+ "github.com/RichardKnop/logging"
+)
+
+var (
+ logger = logging.New(nil, nil, new(logging.ColouredFormatter))
+
+ // DEBUG ...
+ DEBUG = logger[logging.DEBUG]
+ // INFO ...
+ INFO = logger[logging.INFO]
+ // WARNING ...
+ WARNING = logger[logging.WARNING]
+ // ERROR ...
+ ERROR = logger[logging.ERROR]
+ // FATAL ...
+ FATAL = logger[logging.FATAL]
+)
+
+// Set sets a custom logger for all log levels
+func Set(l logging.LoggerInterface) {
+ DEBUG = l
+ INFO = l
+ WARNING = l
+ ERROR = l
+ FATAL = l
+}
+
+// SetDebug sets a custom logger for DEBUG level logs
+func SetDebug(l logging.LoggerInterface) {
+ DEBUG = l
+}
+
+// SetInfo sets a custom logger for INFO level logs
+func SetInfo(l logging.LoggerInterface) {
+ INFO = l
+}
+
+// SetWarning sets a custom logger for WARNING level logs
+func SetWarning(l logging.LoggerInterface) {
+ WARNING = l
+}
+
+// SetError sets a custom logger for ERROR level logs
+func SetError(l logging.LoggerInterface) {
+ ERROR = l
+}
+
+// SetFatal sets a custom logger for FATAL level logs
+func SetFatal(l logging.LoggerInterface) {
+ FATAL = l
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/package.go b/vendor/github.com/RichardKnop/machinery/v1/package.go
new file mode 100644
index 000000000..015873c19
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/package.go
@@ -0,0 +1 @@
+package machinery
diff --git a/vendor/github.com/RichardKnop/machinery/v1/retry/fibonacci.go b/vendor/github.com/RichardKnop/machinery/v1/retry/fibonacci.go
new file mode 100644
index 000000000..9a7bd1bf7
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/retry/fibonacci.go
@@ -0,0 +1,20 @@
+package retry
+
+// Fibonacci returns successive Fibonacci numbers starting from 1
+func Fibonacci() func() int {
+ a, b := 0, 1
+ return func() int {
+ a, b = b, a+b
+ return a
+ }
+}
+
+// FibonacciNext returns next number in Fibonacci sequence greater than start
+func FibonacciNext(start int) int {
+ fib := Fibonacci()
+ num := fib()
+ for num <= start {
+ num = fib()
+ }
+ return num
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/retry/retry.go b/vendor/github.com/RichardKnop/machinery/v1/retry/retry.go
new file mode 100644
index 000000000..11076dfd8
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/retry/retry.go
@@ -0,0 +1,31 @@
+package retry
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/log"
+)
+
+// Closure - a useful closure we can use when there is a problem
+// connecting to the broker. It uses Fibonacci sequence to space out retry attempts
+var Closure = func() func(chan int) {
+ retryIn := 0
+ fibonacci := Fibonacci()
+ return func(stopChan chan int) {
+ if retryIn > 0 {
+ durationString := fmt.Sprintf("%vs", retryIn)
+ duration, _ := time.ParseDuration(durationString)
+
+ log.WARNING.Printf("Retrying in %v seconds", retryIn)
+
+ select {
+ case <-stopChan:
+ break
+ case <-time.After(duration):
+ break
+ }
+ }
+ retryIn = fibonacci()
+ }
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/server.go b/vendor/github.com/RichardKnop/machinery/v1/server.go
new file mode 100644
index 000000000..372d55f02
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/server.go
@@ -0,0 +1,329 @@
+package machinery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/RichardKnop/machinery/v1/backends/result"
+ "github.com/RichardKnop/machinery/v1/brokers/eager"
+ "github.com/RichardKnop/machinery/v1/config"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/RichardKnop/machinery/v1/tracing"
+ "github.com/google/uuid"
+
+ backendsiface "github.com/RichardKnop/machinery/v1/backends/iface"
+ brokersiface "github.com/RichardKnop/machinery/v1/brokers/iface"
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// Server is the main Machinery object and stores all configuration
+// All the tasks workers process are registered against the server
+type Server struct {
+ config *config.Config
+ registeredTasks map[string]interface{}
+ broker brokersiface.Broker
+ backend backendsiface.Backend
+ prePublishHandler func(*tasks.Signature)
+}
+
+// NewServerWithBrokerBackend ...
+func NewServerWithBrokerBackend(cnf *config.Config, brokerServer brokersiface.Broker, backendServer backendsiface.Backend) *Server {
+ return &Server{
+ config: cnf,
+ registeredTasks: make(map[string]interface{}),
+ broker: brokerServer,
+ backend: backendServer,
+ }
+}
+
+// NewServer creates Server instance
+func NewServer(cnf *config.Config) (*Server, error) {
+ broker, err := BrokerFactory(cnf)
+ if err != nil {
+ return nil, err
+ }
+
+ // Backend is optional so we ignore the error
+ backend, _ := BackendFactory(cnf)
+
+ srv := NewServerWithBrokerBackend(cnf, broker, backend)
+
+ // init for eager-mode
+ eager, ok := broker.(eager.Mode)
+ if ok {
+ // we don't have to call worker.Launch in eager mode
+ eager.AssignWorker(srv.NewWorker("eager", 0))
+ }
+
+ return srv, nil
+}
+
+// NewWorker creates Worker instance
+func (server *Server) NewWorker(consumerTag string, concurrency int) *Worker {
+ return &Worker{
+ server: server,
+ ConsumerTag: consumerTag,
+ Concurrency: concurrency,
+ Queue: "",
+ }
+}
+
+// NewCustomQueueWorker creates Worker instance with Custom Queue
+func (server *Server) NewCustomQueueWorker(consumerTag string, concurrency int, queue string) *Worker {
+ return &Worker{
+ server: server,
+ ConsumerTag: consumerTag,
+ Concurrency: concurrency,
+ Queue: queue,
+ }
+}
+
+// GetBroker returns broker
+func (server *Server) GetBroker() brokersiface.Broker {
+ return server.broker
+}
+
+// SetBroker sets broker
+func (server *Server) SetBroker(broker brokersiface.Broker) {
+ server.broker = broker
+}
+
+// GetBackend returns backend
+func (server *Server) GetBackend() backendsiface.Backend {
+ return server.backend
+}
+
+// SetBackend sets backend
+func (server *Server) SetBackend(backend backendsiface.Backend) {
+ server.backend = backend
+}
+
+// GetConfig returns connection object
+func (server *Server) GetConfig() *config.Config {
+ return server.config
+}
+
+// SetConfig sets config
+func (server *Server) SetConfig(cnf *config.Config) {
+ server.config = cnf
+}
+
+// SetPreTaskHandler Sets pre publish handler
+func (server *Server) SetPreTaskHandler(handler func(*tasks.Signature)) {
+ server.prePublishHandler = handler
+}
+
+// RegisterTasks registers all tasks at once
+func (server *Server) RegisterTasks(namedTaskFuncs map[string]interface{}) error {
+ for _, task := range namedTaskFuncs {
+ if err := tasks.ValidateTask(task); err != nil {
+ return err
+ }
+ }
+ server.registeredTasks = namedTaskFuncs
+ server.broker.SetRegisteredTaskNames(server.GetRegisteredTaskNames())
+ return nil
+}
+
+// RegisterTask registers a single task
+func (server *Server) RegisterTask(name string, taskFunc interface{}) error {
+ if err := tasks.ValidateTask(taskFunc); err != nil {
+ return err
+ }
+ server.registeredTasks[name] = taskFunc
+ server.broker.SetRegisteredTaskNames(server.GetRegisteredTaskNames())
+ return nil
+}
+
+// IsTaskRegistered returns true if the task name is registered with this broker
+func (server *Server) IsTaskRegistered(name string) bool {
+ _, ok := server.registeredTasks[name]
+ return ok
+}
+
+// GetRegisteredTask returns registered task by name
+func (server *Server) GetRegisteredTask(name string) (interface{}, error) {
+ taskFunc, ok := server.registeredTasks[name]
+ if !ok {
+ return nil, fmt.Errorf("Task not registered error: %s", name)
+ }
+ return taskFunc, nil
+}
+
+// SendTaskWithContext will inject the trace context in the signature headers before publishing it
+func (server *Server) SendTaskWithContext(ctx context.Context, signature *tasks.Signature) (*result.AsyncResult, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "SendTask", tracing.ProducerOption(), tracing.MachineryTag)
+ defer span.Finish()
+
+ // tag the span with some info about the signature
+ signature.Headers = tracing.HeadersWithSpan(signature.Headers, span)
+
+ // Make sure result backend is defined
+ if server.backend == nil {
+ return nil, errors.New("Result backend required")
+ }
+
+ // Auto generate a UUID if not set already
+ if signature.UUID == "" {
+ taskID := uuid.New().String()
+ signature.UUID = fmt.Sprintf("task_%v", taskID)
+ }
+
+ // Set initial task state to PENDING
+ if err := server.backend.SetStatePending(signature); err != nil {
+ return nil, fmt.Errorf("Set state pending error: %s", err)
+ }
+
+ if server.prePublishHandler != nil {
+ server.prePublishHandler(signature)
+ }
+
+ if err := server.broker.Publish(ctx, signature); err != nil {
+ return nil, fmt.Errorf("Publish message error: %s", err)
+ }
+
+ return result.NewAsyncResult(signature, server.backend), nil
+}
+
+// SendTask publishes a task to the default queue
+func (server *Server) SendTask(signature *tasks.Signature) (*result.AsyncResult, error) {
+ return server.SendTaskWithContext(context.Background(), signature)
+}
+
+// SendChainWithContext will inject the trace context in all the signature headers before publishing it
+func (server *Server) SendChainWithContext(ctx context.Context, chain *tasks.Chain) (*result.ChainAsyncResult, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "SendChain", tracing.ProducerOption(), tracing.MachineryTag, tracing.WorkflowChainTag)
+ defer span.Finish()
+
+ tracing.AnnotateSpanWithChainInfo(span, chain)
+
+ return server.SendChain(chain)
+}
+
+// SendChain triggers a chain of tasks
+func (server *Server) SendChain(chain *tasks.Chain) (*result.ChainAsyncResult, error) {
+ _, err := server.SendTask(chain.Tasks[0])
+ if err != nil {
+ return nil, err
+ }
+
+ return result.NewChainAsyncResult(chain.Tasks, server.backend), nil
+}
+
+// SendGroupWithContext will inject the trace context in all the signature headers before publishing it
+func (server *Server) SendGroupWithContext(ctx context.Context, group *tasks.Group, sendConcurrency int) ([]*result.AsyncResult, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "SendGroup", tracing.ProducerOption(), tracing.MachineryTag, tracing.WorkflowGroupTag)
+ defer span.Finish()
+
+ tracing.AnnotateSpanWithGroupInfo(span, group, sendConcurrency)
+
+ // Make sure result backend is defined
+ if server.backend == nil {
+ return nil, errors.New("Result backend required")
+ }
+
+ asyncResults := make([]*result.AsyncResult, len(group.Tasks))
+
+ var wg sync.WaitGroup
+ wg.Add(len(group.Tasks))
+ errorsChan := make(chan error, len(group.Tasks)*2)
+
+ // Init group
+ server.backend.InitGroup(group.GroupUUID, group.GetUUIDs())
+
+ // Init the tasks Pending state first
+ for _, signature := range group.Tasks {
+ if err := server.backend.SetStatePending(signature); err != nil {
+ errorsChan <- err
+ continue
+ }
+ }
+
+ pool := make(chan struct{}, sendConcurrency)
+ go func() {
+ for i := 0; i < sendConcurrency; i++ {
+ pool <- struct{}{}
+ }
+ }()
+
+ for i, signature := range group.Tasks {
+
+ if sendConcurrency > 0 {
+ <-pool
+ }
+
+ go func(s *tasks.Signature, index int) {
+ defer wg.Done()
+
+ // Publish task
+
+ err := server.broker.Publish(ctx, s)
+
+ if sendConcurrency > 0 {
+ pool <- struct{}{}
+ }
+
+ if err != nil {
+ errorsChan <- fmt.Errorf("Publish message error: %s", err)
+ return
+ }
+
+ asyncResults[index] = result.NewAsyncResult(s, server.backend)
+ }(signature, i)
+ }
+
+ done := make(chan int)
+ go func() {
+ wg.Wait()
+ done <- 1
+ }()
+
+ select {
+ case err := <-errorsChan:
+ return asyncResults, err
+ case <-done:
+ return asyncResults, nil
+ }
+}
+
+// SendGroup triggers a group of parallel tasks
+func (server *Server) SendGroup(group *tasks.Group, sendConcurrency int) ([]*result.AsyncResult, error) {
+ return server.SendGroupWithContext(context.Background(), group, sendConcurrency)
+}
+
+// SendChordWithContext will inject the trace context in all the signature headers before publishing it
+func (server *Server) SendChordWithContext(ctx context.Context, chord *tasks.Chord, sendConcurrency int) (*result.ChordAsyncResult, error) {
+ span, _ := opentracing.StartSpanFromContext(ctx, "SendChord", tracing.ProducerOption(), tracing.MachineryTag, tracing.WorkflowChordTag)
+ defer span.Finish()
+
+ tracing.AnnotateSpanWithChordInfo(span, chord, sendConcurrency)
+
+ _, err := server.SendGroupWithContext(ctx, chord.Group, sendConcurrency)
+ if err != nil {
+ return nil, err
+ }
+
+ return result.NewChordAsyncResult(
+ chord.Group.Tasks,
+ chord.Callback,
+ server.backend,
+ ), nil
+}
+
+// SendChord triggers a group of parallel tasks with a callback
+func (server *Server) SendChord(chord *tasks.Chord, sendConcurrency int) (*result.ChordAsyncResult, error) {
+ return server.SendChordWithContext(context.Background(), chord, sendConcurrency)
+}
+
+// GetRegisteredTaskNames returns slice of registered task names
+func (server *Server) GetRegisteredTaskNames() []string {
+ taskNames := make([]string, len(server.registeredTasks))
+ var i = 0
+ for name := range server.registeredTasks {
+ taskNames[i] = name
+ i++
+ }
+ return taskNames
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/errors.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/errors.go
new file mode 100644
index 000000000..fa32f97a2
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/errors.go
@@ -0,0 +1,32 @@
+package tasks
+
+import (
+ "fmt"
+ "time"
+)
+
+// ErrRetryTaskLater ...
+type ErrRetryTaskLater struct {
+ name, msg string
+ retryIn time.Duration
+}
+
+// RetryIn returns time.Duration from now when task should be retried
+func (e ErrRetryTaskLater) RetryIn() time.Duration {
+ return e.retryIn
+}
+
+// Error implements the error interface
+func (e ErrRetryTaskLater) Error() string {
+ return fmt.Sprintf("Task error: %s Will retry in: %s", e.msg, e.retryIn)
+}
+
+// NewErrRetryTaskLater returns new ErrRetryTaskLater instance
+func NewErrRetryTaskLater(msg string, retryIn time.Duration) ErrRetryTaskLater {
+ return ErrRetryTaskLater{msg: msg, retryIn: retryIn}
+}
+
+// Retriable is interface that retriable errors should implement
+type Retriable interface {
+ RetryIn() time.Duration
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/reflect.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/reflect.go
new file mode 100644
index 000000000..39c1f0fb3
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/reflect.go
@@ -0,0 +1,352 @@
+package tasks
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ typesMap = map[string]reflect.Type{
+ // base types
+ "bool": reflect.TypeOf(true),
+ "int": reflect.TypeOf(int(1)),
+ "int8": reflect.TypeOf(int8(1)),
+ "int16": reflect.TypeOf(int16(1)),
+ "int32": reflect.TypeOf(int32(1)),
+ "int64": reflect.TypeOf(int64(1)),
+ "uint": reflect.TypeOf(uint(1)),
+ "uint8": reflect.TypeOf(uint8(1)),
+ "uint16": reflect.TypeOf(uint16(1)),
+ "uint32": reflect.TypeOf(uint32(1)),
+ "uint64": reflect.TypeOf(uint64(1)),
+ "float32": reflect.TypeOf(float32(0.5)),
+ "float64": reflect.TypeOf(float64(0.5)),
+ "string": reflect.TypeOf(string("")),
+ // slices
+ "[]bool": reflect.TypeOf(make([]bool, 0)),
+ "[]int": reflect.TypeOf(make([]int, 0)),
+ "[]int8": reflect.TypeOf(make([]int8, 0)),
+ "[]int16": reflect.TypeOf(make([]int16, 0)),
+ "[]int32": reflect.TypeOf(make([]int32, 0)),
+ "[]int64": reflect.TypeOf(make([]int64, 0)),
+ "[]uint": reflect.TypeOf(make([]uint, 0)),
+ "[]uint8": reflect.TypeOf(make([]uint8, 0)),
+ "[]uint16": reflect.TypeOf(make([]uint16, 0)),
+ "[]uint32": reflect.TypeOf(make([]uint32, 0)),
+ "[]uint64": reflect.TypeOf(make([]uint64, 0)),
+ "[]float32": reflect.TypeOf(make([]float32, 0)),
+ "[]float64": reflect.TypeOf(make([]float64, 0)),
+ "[]byte": reflect.TypeOf(make([]byte, 0)),
+ "[]string": reflect.TypeOf([]string{""}),
+ }
+
+ ctxType = reflect.TypeOf((*context.Context)(nil)).Elem()
+
+ typeConversionError = func(argValue interface{}, argTypeStr string) error {
+ return fmt.Errorf("%v is not %v", argValue, argTypeStr)
+ }
+)
+
+// ErrUnsupportedType ...
+type ErrUnsupportedType struct {
+ valueType string
+}
+
+// NewErrUnsupportedType returns new ErrUnsupportedType
+func NewErrUnsupportedType(valueType string) ErrUnsupportedType {
+ return ErrUnsupportedType{valueType}
+}
+
+// Error method so we implement the error interface
+func (e ErrUnsupportedType) Error() string {
+ return fmt.Sprintf("%v is not one of supported types", e.valueType)
+}
+
+// ReflectValue converts interface{} to reflect.Value based on string type
+func ReflectValue(valueType string, value interface{}) (reflect.Value, error) {
+ if strings.HasPrefix(valueType, "[]") {
+ return reflectValues(valueType, value)
+ }
+
+ return reflectValue(valueType, value)
+}
+
+// reflectValue converts interface{} to reflect.Value based on string type
+// representing a base type (not a slice)
+func reflectValue(valueType string, value interface{}) (reflect.Value, error) {
+ theType, ok := typesMap[valueType]
+ if !ok {
+ return reflect.Value{}, NewErrUnsupportedType(valueType)
+ }
+ theValue := reflect.New(theType)
+
+ // Booleans
+ if theType.String() == "bool" {
+ boolValue, err := getBoolValue(theType.String(), value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Elem().SetBool(boolValue)
+ return theValue.Elem(), nil
+ }
+
+ // Integers
+ if strings.HasPrefix(theType.String(), "int") {
+ intValue, err := getIntValue(theType.String(), value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Elem().SetInt(intValue)
+ return theValue.Elem(), err
+ }
+
+ // Unsigned integers
+ if strings.HasPrefix(theType.String(), "uint") {
+ uintValue, err := getUintValue(theType.String(), value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Elem().SetUint(uintValue)
+ return theValue.Elem(), err
+ }
+
+ // Floating point numbers
+ if strings.HasPrefix(theType.String(), "float") {
+ floatValue, err := getFloatValue(theType.String(), value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Elem().SetFloat(floatValue)
+ return theValue.Elem(), err
+ }
+
+ // Strings
+ if theType.String() == "string" {
+ stringValue, err := getStringValue(theType.String(), value)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Elem().SetString(stringValue)
+ return theValue.Elem(), nil
+ }
+
+ return reflect.Value{}, NewErrUnsupportedType(valueType)
+}
+
+// reflectValues converts interface{} to reflect.Value based on string type
+// representing a slice of values
+func reflectValues(valueType string, value interface{}) (reflect.Value, error) {
+ theType, ok := typesMap[valueType]
+ if !ok {
+ return reflect.Value{}, NewErrUnsupportedType(valueType)
+ }
+
+ // For NULL we return an empty slice
+ if value == nil {
+ return reflect.MakeSlice(theType, 0, 0), nil
+ }
+
+ var theValue reflect.Value
+
+ // Booleans
+ if theType.String() == "[]bool" {
+ bools := reflect.ValueOf(value)
+
+ theValue = reflect.MakeSlice(theType, bools.Len(), bools.Len())
+ for i := 0; i < bools.Len(); i++ {
+ boolValue, err := getBoolValue(strings.Split(theType.String(), "[]")[1], bools.Index(i).Interface())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Index(i).SetBool(boolValue)
+ }
+
+ return theValue, nil
+ }
+
+ // Integers
+ if strings.HasPrefix(theType.String(), "[]int") {
+ ints := reflect.ValueOf(value)
+
+ theValue = reflect.MakeSlice(theType, ints.Len(), ints.Len())
+ for i := 0; i < ints.Len(); i++ {
+ intValue, err := getIntValue(strings.Split(theType.String(), "[]")[1], ints.Index(i).Interface())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Index(i).SetInt(intValue)
+ }
+
+ return theValue, nil
+ }
+
+ // Unsigned integers
+ if strings.HasPrefix(theType.String(), "[]uint") || theType.String() == "[]byte" {
+
+ // Decode the base64 string if the value type is []uint8 or it's alias []byte
+ // See: https://golang.org/pkg/encoding/json/#Marshal
+ // > Array and slice values encode as JSON arrays, except that []byte encodes as a base64-encoded string
+ if reflect.TypeOf(value).String() == "string" {
+ output, err := base64.StdEncoding.DecodeString(value.(string))
+ if err != nil {
+ return reflect.Value{}, err
+ }
+ value = output
+ }
+
+ uints := reflect.ValueOf(value)
+
+ theValue = reflect.MakeSlice(theType, uints.Len(), uints.Len())
+ for i := 0; i < uints.Len(); i++ {
+ uintValue, err := getUintValue(strings.Split(theType.String(), "[]")[1], uints.Index(i).Interface())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Index(i).SetUint(uintValue)
+ }
+
+ return theValue, nil
+ }
+
+ // Floating point numbers
+ if strings.HasPrefix(theType.String(), "[]float") {
+ floats := reflect.ValueOf(value)
+
+ theValue = reflect.MakeSlice(theType, floats.Len(), floats.Len())
+ for i := 0; i < floats.Len(); i++ {
+ floatValue, err := getFloatValue(strings.Split(theType.String(), "[]")[1], floats.Index(i).Interface())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Index(i).SetFloat(floatValue)
+ }
+
+ return theValue, nil
+ }
+
+ // Strings
+ if theType.String() == "[]string" {
+ strs := reflect.ValueOf(value)
+
+ theValue = reflect.MakeSlice(theType, strs.Len(), strs.Len())
+ for i := 0; i < strs.Len(); i++ {
+ strValue, err := getStringValue(strings.Split(theType.String(), "[]")[1], strs.Index(i).Interface())
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ theValue.Index(i).SetString(strValue)
+ }
+
+ return theValue, nil
+ }
+
+ return reflect.Value{}, NewErrUnsupportedType(valueType)
+}
+
+func getBoolValue(theType string, value interface{}) (bool, error) {
+ b, ok := value.(bool)
+ if !ok {
+ return false, typeConversionError(value, typesMap[theType].String())
+ }
+
+ return b, nil
+}
+
+func getIntValue(theType string, value interface{}) (int64, error) {
+ // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures.
+ // This is because JSON only supports 64-bit floating point numbers and we could lose precision
+ // when converting from float64 to signed integer
+ if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") {
+ n, ok := value.(json.Number)
+ if !ok {
+ return 0, typeConversionError(value, typesMap[theType].String())
+ }
+
+ return n.Int64()
+ }
+
+ n, ok := value.(int64)
+ if !ok {
+ return 0, typeConversionError(value, typesMap[theType].String())
+ }
+
+ return n, nil
+}
+
+func getUintValue(theType string, value interface{}) (uint64, error) {
+ // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures.
+ // This is because JSON only supports 64-bit floating point numbers and we could lose precision
+ // when converting from float64 to unsigned integer
+ if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") {
+ n, ok := value.(json.Number)
+ if !ok {
+ return 0, typeConversionError(value, typesMap[theType].String())
+ }
+
+ intVal, err := n.Int64()
+ if err != nil {
+ return 0, err
+ }
+
+ return uint64(intVal), nil
+ }
+
+ var n uint64
+ switch value.(type) {
+ case uint64:
+ n = value.(uint64)
+ case uint8:
+ n = uint64(value.(uint8))
+ default:
+ return 0, typeConversionError(value, typesMap[theType].String())
+ }
+ return n, nil
+}
+
+func getFloatValue(theType string, value interface{}) (float64, error) {
+ // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures.
+ // This is because JSON only supports 64-bit floating point numbers and we could lose precision
+ if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") {
+ n, ok := value.(json.Number)
+ if !ok {
+ return 0, typeConversionError(value, typesMap[theType].String())
+ }
+
+ return n.Float64()
+ }
+
+ f, ok := value.(float64)
+ if !ok {
+ return 0, typeConversionError(value, typesMap[theType].String())
+ }
+
+ return f, nil
+}
+
+func getStringValue(theType string, value interface{}) (string, error) {
+ s, ok := value.(string)
+ if !ok {
+ return "", typeConversionError(value, typesMap[theType].String())
+ }
+
+ return s, nil
+}
+
+// IsContextType checks to see if the type is a context.Context
+func IsContextType(t reflect.Type) bool {
+ return t == ctxType
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/result.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/result.go
new file mode 100644
index 000000000..0beb62de7
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/result.go
@@ -0,0 +1,40 @@
+package tasks
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// TaskResult represents an actual return value of a processed task
+type TaskResult struct {
+ Type string `bson:"type"`
+ Value interface{} `bson:"value"`
+}
+
+// ReflectTaskResults ...
+func ReflectTaskResults(taskResults []*TaskResult) ([]reflect.Value, error) {
+ resultValues := make([]reflect.Value, len(taskResults))
+ for i, taskResult := range taskResults {
+ resultValue, err := ReflectValue(taskResult.Type, taskResult.Value)
+ if err != nil {
+ return nil, err
+ }
+ resultValues[i] = resultValue
+ }
+ return resultValues, nil
+}
+
+// HumanReadableResults ...
+func HumanReadableResults(results []reflect.Value) string {
+ if len(results) == 1 {
+ return fmt.Sprintf("%v", results[0].Interface())
+ }
+
+ readableResults := make([]string, len(results))
+ for i := 0; i < len(results); i++ {
+ readableResults[i] = fmt.Sprintf("%v", results[i].Interface())
+ }
+
+ return fmt.Sprintf("[%s]", strings.Join(readableResults, ", "))
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/signature.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/signature.go
new file mode 100644
index 000000000..133f782d3
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/signature.go
@@ -0,0 +1,74 @@
+package tasks
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+// Arg represents a single argument passed to invocation fo a task
+type Arg struct {
+ Name string `bson:"name"`
+ Type string `bson:"type"`
+ Value interface{} `bson:"value"`
+}
+
+// Headers represents the headers which should be used to direct the task
+type Headers map[string]interface{}
+
+// Set on Headers implements opentracing.TextMapWriter for trace propagation
+func (h Headers) Set(key, val string) {
+ h[key] = val
+}
+
+// ForeachKey on Headers implements opentracing.TextMapReader for trace propagation.
+// It is essentially the same as the opentracing.TextMapReader implementation except
+// for the added casting from interface{} to string.
+func (h Headers) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range h {
+ // Skip any non string values
+ stringValue, ok := v.(string)
+ if !ok {
+ continue
+ }
+
+ if err := handler(k, stringValue); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Signature represents a single task invocation
+type Signature struct {
+ UUID string
+ Name string
+ RoutingKey string
+ ETA *time.Time
+ GroupUUID string
+ GroupTaskCount int
+ Args []Arg
+ Headers Headers
+ Immutable bool
+ RetryCount int
+ RetryTimeout int
+ OnSuccess []*Signature
+ OnError []*Signature
+ ChordCallback *Signature
+ //MessageGroupId for Broker, e.g. SQS
+ BrokerMessageGroupId string
+ //ReceiptHandle of SQS Message
+ SQSReceiptHandle string
+}
+
+// NewSignature creates a new task signature
+func NewSignature(name string, args []Arg) (*Signature, error) {
+ signatureID := uuid.New().String()
+ return &Signature{
+ UUID: fmt.Sprintf("task_%v", signatureID),
+ Name: name,
+ Args: args,
+ }, nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/state.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/state.go
new file mode 100644
index 000000000..a083b416a
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/state.go
@@ -0,0 +1,107 @@
+package tasks
+
+import "time"
+
+const (
+ // StatePending - initial state of a task
+ StatePending = "PENDING"
+ // StateReceived - when task is received by a worker
+ StateReceived = "RECEIVED"
+ // StateStarted - when the worker starts processing the task
+ StateStarted = "STARTED"
+ // StateRetry - when failed task has been scheduled for retry
+ StateRetry = "RETRY"
+ // StateSuccess - when the task is processed successfully
+ StateSuccess = "SUCCESS"
+ // StateFailure - when processing of the task fails
+ StateFailure = "FAILURE"
+)
+
+// TaskState represents a state of a task
+type TaskState struct {
+ TaskUUID string `bson:"_id"`
+ TaskName string `bson:"task_name"`
+ State string `bson:"state"`
+ Results []*TaskResult `bson:"results"`
+ Error string `bson:"error"`
+ CreatedAt time.Time `bson:"created_at"`
+}
+
+// GroupMeta stores useful metadata about tasks within the same group
+// E.g. UUIDs of all tasks which are used in order to check if all tasks
+// completed successfully or not and thus whether to trigger chord callback
+type GroupMeta struct {
+ GroupUUID string `bson:"_id"`
+ TaskUUIDs []string `bson:"task_uuids"`
+ ChordTriggered bool `bson:"chord_triggered"`
+ Lock bool `bson:"lock"`
+ CreatedAt time.Time `bson:"created_at"`
+}
+
+// NewPendingTaskState ...
+func NewPendingTaskState(signature *Signature) *TaskState {
+ return &TaskState{
+ TaskUUID: signature.UUID,
+ TaskName: signature.Name,
+ State: StatePending,
+ CreatedAt: time.Now().UTC(),
+ }
+}
+
+// NewReceivedTaskState ...
+func NewReceivedTaskState(signature *Signature) *TaskState {
+ return &TaskState{
+ TaskUUID: signature.UUID,
+ State: StateReceived,
+ }
+}
+
+// NewStartedTaskState ...
+func NewStartedTaskState(signature *Signature) *TaskState {
+ return &TaskState{
+ TaskUUID: signature.UUID,
+ State: StateStarted,
+ }
+}
+
+// NewSuccessTaskState ...
+func NewSuccessTaskState(signature *Signature, results []*TaskResult) *TaskState {
+ return &TaskState{
+ TaskUUID: signature.UUID,
+ State: StateSuccess,
+ Results: results,
+ }
+}
+
+// NewFailureTaskState ...
+func NewFailureTaskState(signature *Signature, err string) *TaskState {
+ return &TaskState{
+ TaskUUID: signature.UUID,
+ State: StateFailure,
+ Error: err,
+ }
+}
+
+// NewRetryTaskState ...
+func NewRetryTaskState(signature *Signature) *TaskState {
+ return &TaskState{
+ TaskUUID: signature.UUID,
+ State: StateRetry,
+ }
+}
+
+// IsCompleted returns true if state is SUCCESS or FAILURE,
+// i.e. the task has finished processing and either succeeded or failed.
+func (taskState *TaskState) IsCompleted() bool {
+ return taskState.IsSuccess() || taskState.IsFailure()
+}
+
+// IsSuccess returns true if state is SUCCESS
+func (taskState *TaskState) IsSuccess() bool {
+ return taskState.State == StateSuccess
+}
+
+// IsFailure returns true if state is FAILURE
+func (taskState *TaskState) IsFailure() bool {
+ return taskState.State == StateFailure
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/task.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/task.go
new file mode 100644
index 000000000..d3793d1cc
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/task.go
@@ -0,0 +1,201 @@
+package tasks
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime/debug"
+
+ opentracing "github.com/opentracing/opentracing-go"
+ opentracing_ext "github.com/opentracing/opentracing-go/ext"
+ opentracing_log "github.com/opentracing/opentracing-go/log"
+
+ "github.com/RichardKnop/machinery/v1/log"
+)
+
+// ErrTaskPanicked ...
+var ErrTaskPanicked = errors.New("Invoking task caused a panic")
+
+// Task wraps a signature and methods used to reflect task arguments and
+// return values after invoking the task
+type Task struct {
+ TaskFunc reflect.Value
+ UseContext bool
+ Context context.Context
+ Args []reflect.Value
+}
+
+type signatureCtxType struct{}
+
+var signatureCtx signatureCtxType
+
+// SignatureFromContext gets the signature from the context
+func SignatureFromContext(ctx context.Context) *Signature {
+ if ctx == nil {
+ return nil
+ }
+
+ v := ctx.Value(signatureCtx)
+ if v == nil {
+ return nil
+ }
+
+ signature, _ := v.(*Signature)
+ return signature
+}
+
+// NewWithSignature is the same as New but injects the signature
+func NewWithSignature(taskFunc interface{}, signature *Signature) (*Task, error) {
+ args := signature.Args
+ ctx := context.Background()
+ ctx = context.WithValue(ctx, signatureCtx, signature)
+ task := &Task{
+ TaskFunc: reflect.ValueOf(taskFunc),
+ Context: ctx,
+ }
+
+ taskFuncType := reflect.TypeOf(taskFunc)
+ if taskFuncType.NumIn() > 0 {
+ arg0Type := taskFuncType.In(0)
+ if IsContextType(arg0Type) {
+ task.UseContext = true
+ }
+ }
+
+ if err := task.ReflectArgs(args); err != nil {
+ return nil, fmt.Errorf("Reflect task args error: %s", err)
+ }
+
+ return task, nil
+}
+
+// New tries to use reflection to convert the function and arguments
+// into a reflect.Value and prepare it for invocation
+func New(taskFunc interface{}, args []Arg) (*Task, error) {
+ task := &Task{
+ TaskFunc: reflect.ValueOf(taskFunc),
+ Context: context.Background(),
+ }
+
+ taskFuncType := reflect.TypeOf(taskFunc)
+ if taskFuncType.NumIn() > 0 {
+ arg0Type := taskFuncType.In(0)
+ if IsContextType(arg0Type) {
+ task.UseContext = true
+ }
+ }
+
+ if err := task.ReflectArgs(args); err != nil {
+ return nil, fmt.Errorf("Reflect task args error: %s", err)
+ }
+
+ return task, nil
+}
+
+// Call attempts to call the task with the supplied arguments.
+//
+// `err` is set in the return value in two cases:
+// 1. The reflected function invocation panics (e.g. due to a mismatched
+// argument list).
+// 2. The task func itself returns a non-nil error.
+func (t *Task) Call() (taskResults []*TaskResult, err error) {
+ // retrieve the span from the task's context and finish it as soon as this function returns
+ if span := opentracing.SpanFromContext(t.Context); span != nil {
+ defer span.Finish()
+ }
+
+ defer func() {
+ // Recover from panic and set err.
+ if e := recover(); e != nil {
+ switch e := e.(type) {
+ default:
+ err = ErrTaskPanicked
+ case error:
+ err = e
+ case string:
+ err = errors.New(e)
+ }
+
+ // mark the span as failed and dump the error and stack trace to the span
+ if span := opentracing.SpanFromContext(t.Context); span != nil {
+ opentracing_ext.Error.Set(span, true)
+ span.LogFields(
+ opentracing_log.Error(err),
+ opentracing_log.Object("stack", string(debug.Stack())),
+ )
+ }
+
+ // Print stack trace
+ log.ERROR.Printf("%s", debug.Stack())
+ }
+ }()
+
+ args := t.Args
+
+ if t.UseContext {
+ ctxValue := reflect.ValueOf(t.Context)
+ args = append([]reflect.Value{ctxValue}, args...)
+ }
+
+ // Invoke the task
+ results := t.TaskFunc.Call(args)
+
+ // Task must return at least a value
+ if len(results) == 0 {
+ return nil, ErrTaskReturnsNoValue
+ }
+
+ // Last returned value
+ lastResult := results[len(results)-1]
+
+ // If the last returned value is not nil, it has to be of error type, if that
+ // is not the case, return error message, otherwise propagate the task error
+ // to the caller
+ if !lastResult.IsNil() {
+ // If the result implements Retriable interface, return instance of Retriable
+ retriableErrorInterface := reflect.TypeOf((*Retriable)(nil)).Elem()
+ if lastResult.Type().Implements(retriableErrorInterface) {
+ return nil, lastResult.Interface().(ErrRetryTaskLater)
+ }
+
+ // Otherwise, check that the result implements the standard error interface,
+ // if not, return ErrLastReturnValueMustBeError error
+ errorInterface := reflect.TypeOf((*error)(nil)).Elem()
+ if !lastResult.Type().Implements(errorInterface) {
+ return nil, ErrLastReturnValueMustBeError
+ }
+
+ // Return the standard error
+ return nil, lastResult.Interface().(error)
+ }
+
+ // Convert reflect values to task results
+ taskResults = make([]*TaskResult, len(results)-1)
+ for i := 0; i < len(results)-1; i++ {
+ val := results[i].Interface()
+ typeStr := reflect.TypeOf(val).String()
+ taskResults[i] = &TaskResult{
+ Type: typeStr,
+ Value: val,
+ }
+ }
+
+ return taskResults, err
+}
+
+// ReflectArgs converts []TaskArg to []reflect.Value
+func (t *Task) ReflectArgs(args []Arg) error {
+ argValues := make([]reflect.Value, len(args))
+
+ for i, arg := range args {
+ argValue, err := ReflectValue(arg.Type, arg.Value)
+ if err != nil {
+ return err
+ }
+ argValues[i] = argValue
+ }
+
+ t.Args = argValues
+ return nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/validate.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/validate.go
new file mode 100644
index 000000000..32d11f871
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/validate.go
@@ -0,0 +1,42 @@
+package tasks
+
+import (
+ "errors"
+ "reflect"
+)
+
+var (
+ // ErrTaskMustBeFunc ...
+ ErrTaskMustBeFunc = errors.New("Task must be a func type")
+ // ErrTaskReturnsNoValue ...
+ ErrTaskReturnsNoValue = errors.New("Task must return at least a single value")
+ // ErrLastReturnValueMustBeError ..
+ ErrLastReturnValueMustBeError = errors.New("Last return value of a task must be error")
+)
+
+// ValidateTask validates task function using reflection and makes sure
+// it has a proper signature. Functions used as tasks must return at least a
+// single value and the last return type must be error
+func ValidateTask(task interface{}) error {
+ v := reflect.ValueOf(task)
+ t := v.Type()
+
+ // Task must be a function
+ if t.Kind() != reflect.Func {
+ return ErrTaskMustBeFunc
+ }
+
+ // Task must return at least a single value
+ if t.NumOut() < 1 {
+ return ErrTaskReturnsNoValue
+ }
+
+ // Last return value must be error
+ lastReturnType := t.Out(t.NumOut() - 1)
+ errorInterface := reflect.TypeOf((*error)(nil)).Elem()
+ if !lastReturnType.Implements(errorInterface) {
+ return ErrLastReturnValueMustBeError
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tasks/workflow.go b/vendor/github.com/RichardKnop/machinery/v1/tasks/workflow.go
new file mode 100644
index 000000000..38a786461
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tasks/workflow.go
@@ -0,0 +1,95 @@
+package tasks
+
+import (
+ "fmt"
+
+ "github.com/google/uuid"
+)
+
+// Chain creates a chain of tasks to be executed one after another
+type Chain struct {
+ Tasks []*Signature
+}
+
+// Group creates a set of tasks to be executed in parallel
+type Group struct {
+ GroupUUID string
+ Tasks []*Signature
+}
+
+// Chord adds an optional callback to the group to be executed
+// after all tasks in the group finished
+type Chord struct {
+ Group *Group
+ Callback *Signature
+}
+
+// GetUUIDs returns slice of task UUIDS
+func (group *Group) GetUUIDs() []string {
+ taskUUIDs := make([]string, len(group.Tasks))
+ for i, signature := range group.Tasks {
+ taskUUIDs[i] = signature.UUID
+ }
+ return taskUUIDs
+}
+
+// NewChain creates a new chain of tasks to be processed one by one, passing
+// results unless task signatures are set to be immutable
+func NewChain(signatures ...*Signature) (*Chain, error) {
+ // Auto generate task UUIDs if needed
+ for _, signature := range signatures {
+ if signature.UUID == "" {
+ signatureID := uuid.New().String()
+ signature.UUID = fmt.Sprintf("task_%v", signatureID)
+ }
+ }
+
+ for i := len(signatures) - 1; i > 0; i-- {
+ if i > 0 {
+ signatures[i-1].OnSuccess = []*Signature{signatures[i]}
+ }
+ }
+
+ chain := &Chain{Tasks: signatures}
+
+ return chain, nil
+}
+
+// NewGroup creates a new group of tasks to be processed in parallel
+func NewGroup(signatures ...*Signature) (*Group, error) {
+ // Generate a group UUID
+ groupUUID := uuid.New().String()
+ groupID := fmt.Sprintf("group_%v", groupUUID)
+
+ // Auto generate task UUIDs if needed, group tasks by common group UUID
+ for _, signature := range signatures {
+ if signature.UUID == "" {
+ signatureID := uuid.New().String()
+ signature.UUID = fmt.Sprintf("task_%v", signatureID)
+ }
+ signature.GroupUUID = groupID
+ signature.GroupTaskCount = len(signatures)
+ }
+
+ return &Group{
+ GroupUUID: groupID,
+ Tasks: signatures,
+ }, nil
+}
+
+// NewChord creates a new chord (a group of tasks with a single callback
+// to be executed after all tasks in the group has completed)
+func NewChord(group *Group, callback *Signature) (*Chord, error) {
+ if callback.UUID == "" {
+ // Generate a UUID for the chord callback
+ callbackUUID := uuid.New().String()
+ callback.UUID = fmt.Sprintf("chord_%v", callbackUUID)
+ }
+
+ // Add a chord callback to all tasks
+ for _, signature := range group.Tasks {
+ signature.ChordCallback = callback
+ }
+
+ return &Chord{Group: group, Callback: callback}, nil
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/tracing/tracing.go b/vendor/github.com/RichardKnop/machinery/v1/tracing/tracing.go
new file mode 100644
index 000000000..68d835925
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/tracing/tracing.go
@@ -0,0 +1,141 @@
+package tracing
+
+import (
+ "encoding/json"
+
+ "github.com/RichardKnop/machinery/v1/tasks"
+
+ opentracing "github.com/opentracing/opentracing-go"
+ opentracing_ext "github.com/opentracing/opentracing-go/ext"
+ opentracing_log "github.com/opentracing/opentracing-go/log"
+)
+
+// opentracing tags
+var (
+ MachineryTag = opentracing.Tag{Key: string(opentracing_ext.Component), Value: "machinery"}
+ WorkflowGroupTag = opentracing.Tag{Key: "machinery.workflow", Value: "group"}
+ WorkflowChordTag = opentracing.Tag{Key: "machinery.workflow", Value: "chord"}
+ WorkflowChainTag = opentracing.Tag{Key: "machinery.workflow", Value: "chain"}
+)
+
+// StartSpanFromHeaders will extract a span from the signature headers
+// and start a new span with the given operation name.
+func StartSpanFromHeaders(headers tasks.Headers, operationName string) opentracing.Span {
+ // Try to extract the span context from the carrier.
+ spanContext, err := opentracing.GlobalTracer().Extract(opentracing.TextMap, headers)
+
+ // Create a new span from the span context if found or start a new trace with the function name.
+ // For clarity add the machinery component tag.
+ span := opentracing.StartSpan(
+ operationName,
+ ConsumerOption(spanContext),
+ MachineryTag,
+ )
+
+ // Log any error but don't fail
+ if err != nil {
+ span.LogFields(opentracing_log.Error(err))
+ }
+
+ return span
+}
+
+// HeadersWithSpan will inject a span into the signature headers
+func HeadersWithSpan(headers tasks.Headers, span opentracing.Span) tasks.Headers {
+ // check if the headers aren't nil
+ if headers == nil {
+ headers = make(tasks.Headers)
+ }
+
+ if err := opentracing.GlobalTracer().Inject(span.Context(), opentracing.TextMap, headers); err != nil {
+ span.LogFields(opentracing_log.Error(err))
+ }
+
+ return headers
+}
+
+type consumerOption struct {
+ producerContext opentracing.SpanContext
+}
+
+func (c consumerOption) Apply(o *opentracing.StartSpanOptions) {
+ if c.producerContext != nil {
+ opentracing.FollowsFrom(c.producerContext).Apply(o)
+ }
+ opentracing_ext.SpanKindConsumer.Apply(o)
+}
+
+// ConsumerOption ...
+func ConsumerOption(producer opentracing.SpanContext) opentracing.StartSpanOption {
+ return consumerOption{producer}
+}
+
+type producerOption struct{}
+
+func (p producerOption) Apply(o *opentracing.StartSpanOptions) {
+ opentracing_ext.SpanKindProducer.Apply(o)
+}
+
+// ProducerOption ...
+func ProducerOption() opentracing.StartSpanOption {
+ return producerOption{}
+}
+
+// AnnotateSpanWithSignatureInfo ...
+func AnnotateSpanWithSignatureInfo(span opentracing.Span, signature *tasks.Signature) {
+ // tag the span with some info about the signature
+ span.SetTag("signature.name", signature.Name)
+ span.SetTag("signature.uuid", signature.UUID)
+
+ if signature.GroupUUID != "" {
+ span.SetTag("signature.group.uuid", signature.UUID)
+ }
+
+ if signature.ChordCallback != nil {
+ span.SetTag("signature.chord.callback.uuid", signature.ChordCallback.UUID)
+ span.SetTag("signature.chord.callback.name", signature.ChordCallback.Name)
+ }
+}
+
+// AnnotateSpanWithChainInfo ...
+func AnnotateSpanWithChainInfo(span opentracing.Span, chain *tasks.Chain) {
+ // tag the span with some info about the chain
+ span.SetTag("chain.tasks.length", len(chain.Tasks))
+
+ // inject the tracing span into the tasks signature headers
+ for _, signature := range chain.Tasks {
+ signature.Headers = HeadersWithSpan(signature.Headers, span)
+ }
+}
+
+// AnnotateSpanWithGroupInfo ...
+func AnnotateSpanWithGroupInfo(span opentracing.Span, group *tasks.Group, sendConcurrency int) {
+ // tag the span with some info about the group
+ span.SetTag("group.uuid", group.GroupUUID)
+ span.SetTag("group.tasks.length", len(group.Tasks))
+ span.SetTag("group.concurrency", sendConcurrency)
+
+ // encode the task uuids to json, if that fails just dump it in
+ if taskUUIDs, err := json.Marshal(group.GetUUIDs()); err == nil {
+ span.SetTag("group.tasks", string(taskUUIDs))
+ } else {
+ span.SetTag("group.tasks", group.GetUUIDs())
+ }
+
+ // inject the tracing span into the tasks signature headers
+ for _, signature := range group.Tasks {
+ signature.Headers = HeadersWithSpan(signature.Headers, span)
+ }
+}
+
+// AnnotateSpanWithChordInfo ...
+func AnnotateSpanWithChordInfo(span opentracing.Span, chord *tasks.Chord, sendConcurrency int) {
+ // tag the span with chord specific info
+ span.SetTag("chord.callback.uuid", chord.Callback.UUID)
+
+ // inject the tracing span into the callback signature
+ chord.Callback.Headers = HeadersWithSpan(chord.Callback.Headers, span)
+
+ // tag the span for the group part of the chord
+ AnnotateSpanWithGroupInfo(span, chord.Group, sendConcurrency)
+}
diff --git a/vendor/github.com/RichardKnop/machinery/v1/worker.go b/vendor/github.com/RichardKnop/machinery/v1/worker.go
new file mode 100644
index 000000000..9f0038d7d
--- /dev/null
+++ b/vendor/github.com/RichardKnop/machinery/v1/worker.go
@@ -0,0 +1,393 @@
+package machinery
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/RichardKnop/machinery/v1/backends/amqp"
+ "github.com/RichardKnop/machinery/v1/log"
+ "github.com/RichardKnop/machinery/v1/retry"
+ "github.com/RichardKnop/machinery/v1/tasks"
+ "github.com/RichardKnop/machinery/v1/tracing"
+ "github.com/opentracing/opentracing-go"
+)
+
+// Worker represents a single worker process
+type Worker struct {
+ server *Server
+ ConsumerTag string
+ Concurrency int
+ Queue string
+ errorHandler func(err error)
+ preTaskHandler func(*tasks.Signature)
+ postTaskHandler func(*tasks.Signature)
+}
+
+// Launch starts a new worker process. The worker subscribes
+// to the default queue and processes incoming registered tasks
+func (worker *Worker) Launch() error {
+ errorsChan := make(chan error)
+
+ worker.LaunchAsync(errorsChan)
+
+ return <-errorsChan
+}
+
+// LaunchAsync is a non blocking version of Launch
+func (worker *Worker) LaunchAsync(errorsChan chan<- error) {
+ cnf := worker.server.GetConfig()
+ broker := worker.server.GetBroker()
+
+ // Log some useful information about worker configuration
+ log.INFO.Printf("Launching a worker with the following settings:")
+ log.INFO.Printf("- Broker: %s", cnf.Broker)
+ if worker.Queue == "" {
+ log.INFO.Printf("- DefaultQueue: %s", cnf.DefaultQueue)
+ } else {
+ log.INFO.Printf("- CustomQueue: %s", worker.Queue)
+ }
+ log.INFO.Printf("- ResultBackend: %s", cnf.ResultBackend)
+ if cnf.AMQP != nil {
+ log.INFO.Printf("- AMQP: %s", cnf.AMQP.Exchange)
+ log.INFO.Printf(" - Exchange: %s", cnf.AMQP.Exchange)
+ log.INFO.Printf(" - ExchangeType: %s", cnf.AMQP.ExchangeType)
+ log.INFO.Printf(" - BindingKey: %s", cnf.AMQP.BindingKey)
+ log.INFO.Printf(" - PrefetchCount: %d", cnf.AMQP.PrefetchCount)
+ }
+
+ // Goroutine to start broker consumption and handle retries when broker connection dies
+ go func() {
+ for {
+ retry, err := broker.StartConsuming(worker.ConsumerTag, worker.Concurrency, worker)
+
+ if retry {
+ if worker.errorHandler != nil {
+ worker.errorHandler(err)
+ } else {
+ log.WARNING.Printf("Broker failed with error: %s", err)
+ }
+ } else {
+ errorsChan <- err // stop the goroutine
+ return
+ }
+ }
+ }()
+ if !cnf.NoUnixSignals {
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, os.Interrupt, syscall.SIGTERM)
+ var signalsReceived uint
+
+ // Goroutine Handle SIGINT and SIGTERM signals
+ go func() {
+ for {
+ select {
+ case s := <-sig:
+ log.WARNING.Printf("Signal received: %v", s)
+ signalsReceived++
+
+ if signalsReceived < 2 {
+ // After first Ctrl+C start quitting the worker gracefully
+ log.WARNING.Print("Waiting for running tasks to finish before shutting down")
+ go func() {
+ worker.Quit()
+ errorsChan <- errors.New("Worker quit gracefully")
+ }()
+ } else {
+ // Abort the program when user hits Ctrl+C second time in a row
+ errorsChan <- errors.New("Worker quit abruptly")
+ }
+ }
+ }
+ }()
+ }
+}
+
+// CustomQueue returns Custom Queue of the running worker process
+func (worker *Worker) CustomQueue() string {
+ return worker.Queue
+}
+
+// Quit tears down the running worker process
+func (worker *Worker) Quit() {
+ worker.server.GetBroker().StopConsuming()
+}
+
+// Process handles received tasks and triggers success/error callbacks
+func (worker *Worker) Process(signature *tasks.Signature) error {
+ // If the task is not registered with this worker, do not continue
+ // but only return nil as we do not want to restart the worker process
+ if !worker.server.IsTaskRegistered(signature.Name) {
+ return nil
+ }
+
+ taskFunc, err := worker.server.GetRegisteredTask(signature.Name)
+ if err != nil {
+ return nil
+ }
+
+ // Update task state to RECEIVED
+ if err = worker.server.GetBackend().SetStateReceived(signature); err != nil {
+ return fmt.Errorf("Set state to 'received' for task %s returned error: %s", signature.UUID, err)
+ }
+
+ // Prepare task for processing
+ task, err := tasks.NewWithSignature(taskFunc, signature)
+ // if this failed, it means the task is malformed, probably has invalid
+ // signature, go directly to task failed without checking whether to retry
+ if err != nil {
+ worker.taskFailed(signature, err)
+ return err
+ }
+
+ // try to extract trace span from headers and add it to the function context
+ // so it can be used inside the function if it has context.Context as the first
+ // argument. Start a new span if it isn't found.
+ taskSpan := tracing.StartSpanFromHeaders(signature.Headers, signature.Name)
+ tracing.AnnotateSpanWithSignatureInfo(taskSpan, signature)
+ task.Context = opentracing.ContextWithSpan(task.Context, taskSpan)
+
+ // Update task state to STARTED
+ if err = worker.server.GetBackend().SetStateStarted(signature); err != nil {
+ return fmt.Errorf("Set state to 'started' for task %s returned error: %s", signature.UUID, err)
+ }
+
+ //Run handler before the task is called
+ if worker.preTaskHandler != nil {
+ worker.preTaskHandler(signature)
+ }
+
+ //Defer run handler for the end of the task
+ if worker.postTaskHandler != nil {
+ defer worker.postTaskHandler(signature)
+ }
+
+ // Call the task
+ results, err := task.Call()
+ if err != nil {
+ // If a tasks.ErrRetryTaskLater was returned from the task,
+ // retry the task after specified duration
+ retriableErr, ok := interface{}(err).(tasks.ErrRetryTaskLater)
+ if ok {
+ return worker.retryTaskIn(signature, retriableErr.RetryIn())
+ }
+
+ // Otherwise, execute default retry logic based on signature.RetryCount
+ // and signature.RetryTimeout values
+ if signature.RetryCount > 0 {
+ return worker.taskRetry(signature)
+ }
+
+ return worker.taskFailed(signature, err)
+ }
+
+ return worker.taskSucceeded(signature, results)
+}
+
+// retryTask decrements RetryCount counter and republishes the task to the queue
+func (worker *Worker) taskRetry(signature *tasks.Signature) error {
+ // Update task state to RETRY
+ if err := worker.server.GetBackend().SetStateRetry(signature); err != nil {
+ return fmt.Errorf("Set state to 'retry' for task %s returned error: %s", signature.UUID, err)
+ }
+
+ // Decrement the retry counter, when it reaches 0, we won't retry again
+ signature.RetryCount--
+
+ // Increase retry timeout
+ signature.RetryTimeout = retry.FibonacciNext(signature.RetryTimeout)
+
+ // Delay task by signature.RetryTimeout seconds
+ eta := time.Now().UTC().Add(time.Second * time.Duration(signature.RetryTimeout))
+ signature.ETA = &eta
+
+ log.WARNING.Printf("Task %s failed. Going to retry in %d seconds.", signature.UUID, signature.RetryTimeout)
+
+ // Send the task back to the queue
+ _, err := worker.server.SendTask(signature)
+ return err
+}
+
+// taskRetryIn republishes the task to the queue with ETA of now + retryIn.Seconds()
+func (worker *Worker) retryTaskIn(signature *tasks.Signature, retryIn time.Duration) error {
+ // Update task state to RETRY
+ if err := worker.server.GetBackend().SetStateRetry(signature); err != nil {
+ return fmt.Errorf("Set state to 'retry' for task %s returned error: %s", signature.UUID, err)
+ }
+
+ // Delay task by retryIn duration
+ eta := time.Now().UTC().Add(retryIn)
+ signature.ETA = &eta
+
+ log.WARNING.Printf("Task %s failed. Going to retry in %.0f seconds.", signature.UUID, retryIn.Seconds())
+
+ // Send the task back to the queue
+ _, err := worker.server.SendTask(signature)
+ return err
+}
+
+// taskSucceeded updates the task state and triggers success callbacks or a
+// chord callback if this was the last task of a group with a chord callback
+func (worker *Worker) taskSucceeded(signature *tasks.Signature, taskResults []*tasks.TaskResult) error {
+ // Update task state to SUCCESS
+ if err := worker.server.GetBackend().SetStateSuccess(signature, taskResults); err != nil {
+ return fmt.Errorf("Set state to 'success' for task %s returned error: %s", signature.UUID, err)
+ }
+
+ // Log human readable results of the processed task
+ var debugResults = "[]"
+ results, err := tasks.ReflectTaskResults(taskResults)
+ if err != nil {
+ log.WARNING.Print(err)
+ } else {
+ debugResults = tasks.HumanReadableResults(results)
+ }
+ log.DEBUG.Printf("Processed task %s. Results = %s", signature.UUID, debugResults)
+
+ // Trigger success callbacks
+
+ for _, successTask := range signature.OnSuccess {
+ if signature.Immutable == false {
+ // Pass results of the task to success callbacks
+ for _, taskResult := range taskResults {
+ successTask.Args = append(successTask.Args, tasks.Arg{
+ Type: taskResult.Type,
+ Value: taskResult.Value,
+ })
+ }
+ }
+
+ worker.server.SendTask(successTask)
+ }
+
+ // If the task was not part of a group, just return
+ if signature.GroupUUID == "" {
+ return nil
+ }
+
+ // Check if all task in the group has completed
+ groupCompleted, err := worker.server.GetBackend().GroupCompleted(
+ signature.GroupUUID,
+ signature.GroupTaskCount,
+ )
+ if err != nil {
+ return fmt.Errorf("Completed check for group %s returned error: %s", signature.GroupUUID, err)
+ }
+
+ // If the group has not yet completed, just return
+ if !groupCompleted {
+ return nil
+ }
+
+ // Defer purging of group meta queue if we are using AMQP backend
+ if worker.hasAMQPBackend() {
+ defer worker.server.GetBackend().PurgeGroupMeta(signature.GroupUUID)
+ }
+
+ // There is no chord callback, just return
+ if signature.ChordCallback == nil {
+ return nil
+ }
+
+ // Trigger chord callback
+ shouldTrigger, err := worker.server.GetBackend().TriggerChord(signature.GroupUUID)
+ if err != nil {
+ return fmt.Errorf("Triggering chord for group %s returned error: %s", signature.GroupUUID, err)
+ }
+
+ // Chord has already been triggered
+ if !shouldTrigger {
+ return nil
+ }
+
+ // Get task states
+ taskStates, err := worker.server.GetBackend().GroupTaskStates(
+ signature.GroupUUID,
+ signature.GroupTaskCount,
+ )
+ if err != nil {
+ return nil
+ }
+
+ // Append group tasks' return values to chord task if it's not immutable
+ for _, taskState := range taskStates {
+ if !taskState.IsSuccess() {
+ return nil
+ }
+
+ if signature.ChordCallback.Immutable == false {
+ // Pass results of the task to the chord callback
+ for _, taskResult := range taskState.Results {
+ signature.ChordCallback.Args = append(signature.ChordCallback.Args, tasks.Arg{
+ Type: taskResult.Type,
+ Value: taskResult.Value,
+ })
+ }
+ }
+ }
+
+ // Send the chord task
+ _, err = worker.server.SendTask(signature.ChordCallback)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// taskFailed updates the task state and triggers error callbacks
+func (worker *Worker) taskFailed(signature *tasks.Signature, taskErr error) error {
+ // Update task state to FAILURE
+ if err := worker.server.GetBackend().SetStateFailure(signature, taskErr.Error()); err != nil {
+ return fmt.Errorf("Set state to 'failure' for task %s returned error: %s", signature.UUID, err)
+ }
+
+ if worker.errorHandler != nil {
+ worker.errorHandler(taskErr)
+ } else {
+ log.ERROR.Printf("Failed processing task %s. Error = %v", signature.UUID, taskErr)
+ }
+
+ // Trigger error callbacks
+ for _, errorTask := range signature.OnError {
+ // Pass error as a first argument to error callbacks
+ args := append([]tasks.Arg{{
+ Type: "string",
+ Value: taskErr.Error(),
+ }}, errorTask.Args...)
+ errorTask.Args = args
+ worker.server.SendTask(errorTask)
+ }
+
+ return nil
+}
+
+// Returns true if the worker uses AMQP backend
+func (worker *Worker) hasAMQPBackend() bool {
+ _, ok := worker.server.GetBackend().(*amqp.Backend)
+ return ok
+}
+
+// SetErrorHandler sets a custom error handler for task errors
+// A default behavior is just to log the error after all the retry attempts fail
+func (worker *Worker) SetErrorHandler(handler func(err error)) {
+ worker.errorHandler = handler
+}
+
+//SetPreTaskHandler sets a custom handler func before a job is started
+func (worker *Worker) SetPreTaskHandler(handler func(*tasks.Signature)) {
+ worker.preTaskHandler = handler
+}
+
+//SetPostTaskHandler sets a custom handler for the end of a job
+func (worker *Worker) SetPostTaskHandler(handler func(*tasks.Signature)) {
+ worker.postTaskHandler = handler
+}
+
+//GetServer returns server
+func (worker *Worker) GetServer() *Server {
+ return worker.server
+}
diff --git a/vendor/github.com/RichardKnop/redsync/.gitlab-ci.yml b/vendor/github.com/RichardKnop/redsync/.gitlab-ci.yml
new file mode 100644
index 000000000..497d40b15
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/.gitlab-ci.yml
@@ -0,0 +1,24 @@
+before_script:
+ - mkdir -p $GOPATH/src/github.com/redsync
+ - ln -s $CI_PROJECT_DIR $GOPATH/src/github.com/redsync/redsync
+ - cd $GOPATH/src/github.com/redsync/redsync
+ - apt-get update
+ - apt-get -y install redis-server
+
+stages:
+ - build
+ - test
+
+build-go-1.5:
+ image: golang:1.5
+ stage: build
+ script:
+ - go get -v
+ - go build -v
+
+test-go-1.5:
+ image: golang:1.5
+ stage: test
+ script:
+ - go get -v -t
+ - go test -v
diff --git a/vendor/github.com/RichardKnop/redsync/LICENSE b/vendor/github.com/RichardKnop/redsync/LICENSE
new file mode 100644
index 000000000..8498c11a2
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2016, Mahmud Ridwan
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the Redsync nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/RichardKnop/redsync/README.md b/vendor/github.com/RichardKnop/redsync/README.md
new file mode 100644
index 000000000..69173d67f
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/README.md
@@ -0,0 +1,29 @@
+# Redsync
+
+[](https://drone.io/github.com/go-redsync/redsync/latest)
+
+Redsync provides a Redis-based distributed mutual exclusion lock implementation for Go as described in [this post](http://redis.io/topics/distlock). A reference library (by [antirez](https://github.com/antirez)) for Ruby is available at [github.com/antirez/redlock-rb](https://github.com/antirez/redlock-rb).
+
+## Installation
+
+Install Redsync using the go get command:
+
+ $ go get gopkg.in/redsync.v1
+
+The only dependencies are the Go distribution and [Redigo](https://github.com/gomodule/redigo).
+
+## Documentation
+
+- [Reference](https://godoc.org/gopkg.in/redsync.v1)
+
+## Contributing
+
+Contributions are welcome.
+
+## License
+
+Redsync is available under the [BSD (3-Clause) License](https://opensource.org/licenses/BSD-3-Clause).
+
+## Disclaimer
+
+This code implements an algorithm which is currently a proposal, it was not formally analyzed. Make sure to understand how it works before using it in production environments.
diff --git a/vendor/github.com/RichardKnop/redsync/VERSION b/vendor/github.com/RichardKnop/redsync/VERSION
new file mode 100644
index 000000000..626799f0f
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/VERSION
@@ -0,0 +1 @@
+v1
diff --git a/vendor/github.com/RichardKnop/redsync/doc.go b/vendor/github.com/RichardKnop/redsync/doc.go
new file mode 100644
index 000000000..b215b6ea7
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/doc.go
@@ -0,0 +1,4 @@
+// Package redsync provides a Redis-based distributed mutual exclusion lock implementation as described in the post http://redis.io/topics/distlock.
+//
+// Values containing the types defined in this package should not be copied.
+package redsync
diff --git a/vendor/github.com/RichardKnop/redsync/error.go b/vendor/github.com/RichardKnop/redsync/error.go
new file mode 100644
index 000000000..47cc26ac0
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/error.go
@@ -0,0 +1,5 @@
+package redsync
+
+import "errors"
+
+var ErrFailed = errors.New("redsync: failed to acquire lock")
diff --git a/vendor/github.com/RichardKnop/redsync/mutex.go b/vendor/github.com/RichardKnop/redsync/mutex.go
new file mode 100644
index 000000000..f0523ca95
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/mutex.go
@@ -0,0 +1,145 @@
+package redsync
+
+import (
+ "crypto/rand"
+ "encoding/base64"
+ "sync"
+ "time"
+
+ "github.com/gomodule/redigo/redis"
+)
+
+// A Mutex is a distributed mutual exclusion lock.
+type Mutex struct {
+ name string
+ expiry time.Duration
+
+ tries int
+ delay time.Duration
+
+ factor float64
+
+ quorum int
+
+ value string
+ until time.Time
+
+ nodem sync.Mutex
+
+ pools []Pool
+}
+
+// Lock locks m. In case it returns an error on failure, you may retry to acquire the lock by calling this method again.
+func (m *Mutex) Lock() error {
+ m.nodem.Lock()
+ defer m.nodem.Unlock()
+
+ value, err := m.genValue()
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < m.tries; i++ {
+ if i != 0 {
+ time.Sleep(m.delay)
+ }
+
+ start := time.Now()
+
+ n := 0
+ for _, pool := range m.pools {
+ ok := m.acquire(pool, value)
+ if ok {
+ n++
+ }
+ }
+
+ until := time.Now().Add(m.expiry - time.Now().Sub(start) - time.Duration(int64(float64(m.expiry)*m.factor)) + 2*time.Millisecond)
+ if n >= m.quorum && time.Now().Before(until) {
+ m.value = value
+ m.until = until
+ return nil
+ }
+ for _, pool := range m.pools {
+ m.release(pool, value)
+ }
+ }
+
+ return ErrFailed
+}
+
+// Unlock unlocks m and returns the status of unlock.
+func (m *Mutex) Unlock() bool {
+ m.nodem.Lock()
+ defer m.nodem.Unlock()
+
+ n := 0
+ for _, pool := range m.pools {
+ ok := m.release(pool, m.value)
+ if ok {
+ n++
+ }
+ }
+ return n >= m.quorum
+}
+
+// Extend resets the mutex's expiry and returns the status of expiry extension.
+func (m *Mutex) Extend() bool {
+ m.nodem.Lock()
+ defer m.nodem.Unlock()
+
+ n := 0
+ for _, pool := range m.pools {
+ ok := m.touch(pool, m.value, int(m.expiry/time.Millisecond))
+ if ok {
+ n++
+ }
+ }
+ return n >= m.quorum
+}
+
+func (m *Mutex) genValue() (string, error) {
+ b := make([]byte, 32)
+ _, err := rand.Read(b)
+ if err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(b), nil
+}
+
+func (m *Mutex) acquire(pool Pool, value string) bool {
+ conn := pool.Get()
+ defer conn.Close()
+ reply, err := redis.String(conn.Do("SET", m.name, value, "NX", "PX", int(m.expiry/time.Millisecond)))
+ return err == nil && reply == "OK"
+}
+
+var deleteScript = redis.NewScript(1, `
+ if redis.call("GET", KEYS[1]) == ARGV[1] then
+ return redis.call("DEL", KEYS[1])
+ else
+ return 0
+ end
+`)
+
+func (m *Mutex) release(pool Pool, value string) bool {
+ conn := pool.Get()
+ defer conn.Close()
+ status, err := deleteScript.Do(conn, m.name, value)
+ return err == nil && status != 0
+}
+
+var touchScript = redis.NewScript(1, `
+ if redis.call("GET", KEYS[1]) == ARGV[1] then
+ return redis.call("SET", KEYS[1], ARGV[1], "XX", "PX", ARGV[2])
+ else
+ return "ERR"
+ end
+`)
+
+func (m *Mutex) touch(pool Pool, value string, expiry int) bool {
+ conn := pool.Get()
+ defer conn.Close()
+ status, err := redis.String(touchScript.Do(conn, m.name, value, expiry))
+ return err == nil && status != "ERR"
+}
diff --git a/vendor/github.com/RichardKnop/redsync/redis.go b/vendor/github.com/RichardKnop/redsync/redis.go
new file mode 100644
index 000000000..d0b4b7841
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/redis.go
@@ -0,0 +1,8 @@
+package redsync
+
+import "github.com/gomodule/redigo/redis"
+
+// A Pool maintains a pool of Redis connections.
+type Pool interface {
+ Get() redis.Conn
+}
diff --git a/vendor/github.com/RichardKnop/redsync/redsync.go b/vendor/github.com/RichardKnop/redsync/redsync.go
new file mode 100644
index 000000000..4baa53196
--- /dev/null
+++ b/vendor/github.com/RichardKnop/redsync/redsync.go
@@ -0,0 +1,73 @@
+package redsync
+
+import "time"
+
+// Redsync provides a simple method for creating distributed mutexes using multiple Redis connection pools.
+type Redsync struct {
+ pools []Pool
+}
+
+// New creates and returns a new Redsync instance from given Redis connection pools.
+func New(pools []Pool) *Redsync {
+ return &Redsync{
+ pools: pools,
+ }
+}
+
+// NewMutex returns a new distributed mutex with given name.
+func (r *Redsync) NewMutex(name string, options ...Option) *Mutex {
+ m := &Mutex{
+ name: name,
+ expiry: 8 * time.Second,
+ tries: 32,
+ delay: 500 * time.Millisecond,
+ factor: 0.01,
+ quorum: len(r.pools)/2 + 1,
+ pools: r.pools,
+ }
+ for _, o := range options {
+ o.Apply(m)
+ }
+ return m
+}
+
+// An Option configures a mutex.
+type Option interface {
+ Apply(*Mutex)
+}
+
+// OptionFunc is a function that configures a mutex.
+type OptionFunc func(*Mutex)
+
+// Apply calls f(mutex)
+func (f OptionFunc) Apply(mutex *Mutex) {
+ f(mutex)
+}
+
+// SetExpiry can be used to set the expiry of a mutex to the given value.
+func SetExpiry(expiry time.Duration) Option {
+ return OptionFunc(func(m *Mutex) {
+ m.expiry = expiry
+ })
+}
+
+// SetTries can be used to set the number of times lock acquire is attempted.
+func SetTries(tries int) Option {
+ return OptionFunc(func(m *Mutex) {
+ m.tries = tries
+ })
+}
+
+// SetRetryDelay can be used to set the amount of time to wait between retries.
+func SetRetryDelay(delay time.Duration) Option {
+ return OptionFunc(func(m *Mutex) {
+ m.delay = delay
+ })
+}
+
+// SetDriftFactor can be used to set the clock drift factor.
+func SetDriftFactor(factor float64) Option {
+ return OptionFunc(func(m *Mutex) {
+ m.factor = factor
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 000000000..899129ecc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 000000000..99849c0e1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,164 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
+
+// UnmarshalError provides the interface for the SDK failing to unmarshal data.
+type UnmarshalError interface {
+ awsError
+ Bytes() []byte
+}
+
+// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
+// the bytes that fail to unmarshal to the error.
+func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
+ return &unmarshalError{
+ awsError: New("UnmarshalError", msg, err),
+ bytes: bytes,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 000000000..9cf7eaf40
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,221 @@
+package awserr
+
+import (
+ "encoding/hex"
+ "fmt"
+)
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+ bytes []byte
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+type unmarshalError struct {
+ awsError
+ bytes []byte
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (e unmarshalError) Error() string {
+ extra := hex.Dump(e.bytes)
+ return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (e unmarshalError) String() string {
+ return e.Error()
+}
+
+// Bytes returns the bytes that failed to unmarshal.
+func (e unmarshalError) Bytes() []byte {
+ return e.bytes
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += e[i].Error()
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 000000000..1a3d106d5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ dst.Set(reflect.New(e))
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 000000000..142a7a01c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type they are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 000000000..285e54d67
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,221 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ rvals := rValuesAtPath(i, path, true, false, v == nil)
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 000000000..710eb432f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 000000000..645df2450
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,88 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 000000000..03334d692
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,97 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ PartitionID string
+ Endpoint string
+ SigningRegion string
+ SigningName string
+
+ // States that the signing name did not come from a modeled source but
+ // was derived based on other data. Used by service client constructors
+ // to determine if the signin name can be overridden based on metadata the
+ // service has.
+ SigningNameDerived bool
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+ ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers.Copy(),
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = DefaultRetryerMaxNumRetries
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
+ c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 000000000..9f6af19dd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,177 @@
+package client
+
+import (
+ "math"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, you can implement the
+// request.Retryer interface.
+//
+type DefaultRetryer struct {
+ // Num max Retries is the number of max retries that will be performed.
+ // By default, this is zero.
+ NumMaxRetries int
+
+ // MinRetryDelay is the minimum retry delay after which retry will be performed.
+ // If not set, the value is 0ns.
+ MinRetryDelay time.Duration
+
+ // MinThrottleRetryDelay is the minimum retry delay when throttled.
+ // If not set, the value is 0ns.
+ MinThrottleDelay time.Duration
+
+ // MaxRetryDelay is the maximum retry delay before which retry must be performed.
+ // If not set, the value is 0ns.
+ MaxRetryDelay time.Duration
+
+ // MaxThrottleDelay is the maximum retry delay when throttled.
+ // If not set, the value is 0ns.
+ MaxThrottleDelay time.Duration
+}
+
+const (
+ // DefaultRetryerMaxNumRetries sets maximum number of retries
+ DefaultRetryerMaxNumRetries = 3
+
+ // DefaultRetryerMinRetryDelay sets minimum retry delay
+ DefaultRetryerMinRetryDelay = 30 * time.Millisecond
+
+ // DefaultRetryerMinThrottleDelay sets minimum delay when throttled
+ DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
+
+ // DefaultRetryerMaxRetryDelay sets maximum retry delay
+ DefaultRetryerMaxRetryDelay = 300 * time.Second
+
+ // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
+ DefaultRetryerMaxThrottleDelay = 300 * time.Second
+)
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+// setRetryerDefaults sets the default values of the retryer if not set
+func (d *DefaultRetryer) setRetryerDefaults() {
+ if d.MinRetryDelay == 0 {
+ d.MinRetryDelay = DefaultRetryerMinRetryDelay
+ }
+ if d.MaxRetryDelay == 0 {
+ d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
+ }
+ if d.MinThrottleDelay == 0 {
+ d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
+ }
+ if d.MaxThrottleDelay == 0 {
+ d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
+ }
+}
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+
+ // if number of max retries is zero, no retries will be performed.
+ if d.NumMaxRetries == 0 {
+ return 0
+ }
+
+ // Sets default value for retryer members
+ d.setRetryerDefaults()
+
+ // minDelay is the minimum retryer delay
+ minDelay := d.MinRetryDelay
+
+ var initialDelay time.Duration
+
+ isThrottle := r.IsErrorThrottle()
+ if isThrottle {
+ if delay, ok := getRetryAfterDelay(r); ok {
+ initialDelay = delay
+ }
+ minDelay = d.MinThrottleDelay
+ }
+
+ retryCount := r.RetryCount
+
+ // maxDelay the maximum retryer delay
+ maxDelay := d.MaxRetryDelay
+
+ if isThrottle {
+ maxDelay = d.MaxThrottleDelay
+ }
+
+ var delay time.Duration
+
+ // Logic to cap the retry count based on the minDelay provided
+ actualRetryCount := int(math.Log2(float64(minDelay))) + 1
+ if actualRetryCount < 63-retryCount {
+ delay = time.Duration(1< maxDelay {
+ delay = getJitterDelay(maxDelay / 2)
+ }
+ } else {
+ delay = getJitterDelay(maxDelay / 2)
+ }
+ return delay + initialDelay
+}
+
+// getJitterDelay returns a jittered delay for retry
+func getJitterDelay(duration time.Duration) time.Duration {
+ return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+
+ // ShouldRetry returns false if number of max retries is 0.
+ if d.NumMaxRetries == 0 {
+ return false
+ }
+
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+ return r.IsErrorRetryable() || r.IsErrorThrottle()
+}
+
+// This will look in the Retry-After header, RFC 7231, for how long
+// it will wait before attempting another request
+func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
+ if !canUseRetryAfterHeader(r) {
+ return 0, false
+ }
+
+ delayStr := r.HTTPResponse.Header.Get("Retry-After")
+ if len(delayStr) == 0 {
+ return 0, false
+ }
+
+ delay, err := strconv.Atoi(delayStr)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(delay) * time.Second, true
+}
+
+// Will look at the status code to see if the retry header pertains to
+// the status code.
+func canUseRetryAfterHeader(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 503:
+ default:
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
new file mode 100644
index 000000000..8958c32d4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -0,0 +1,194 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger aws.Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will include the HTTP request body if the LogLevel of the
+// request matches LogDebugWithHTTPBody.
+var LogHTTPRequestHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequest",
+ Fn: logRequest,
+}
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ bodySeekable := aws.IsReaderSeekable(r.Body)
+
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ if !bodySeekable {
+ r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
+ }
+ // Reset the request body because dumpRequest will re-wrap the
+ // r.HTTPRequest's Body as a NoOpCloser and will not be reset after
+ // read by the HTTP client reader.
+ if err := r.Error; err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will only log the HTTP request's headers. The request payload
+// will not be read.
+var LogHTTPRequestHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequestHeader",
+ Fn: logRequestHeader,
+}
+
+func logRequestHeader(r *request.Request) {
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
+// received from a service. Will include the HTTP response body if the LogLevel
+// of the request matches LogDebugWithHTTPBody.
+var LogHTTPResponseHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponse",
+ Fn: logResponse,
+}
+
+func logResponse(r *request.Request) {
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+
+ if r.HTTPResponse == nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
+ return
+ }
+
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ if logBody {
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+ }
+
+ handlerFn := func(req *request.Request) {
+ b, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(fmt.Sprintf(logRespMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
+
+ if logBody {
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
+
+// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
+// response received from a service. Will only log the HTTP response's headers.
+// The response payload will not be read.
+var LogHTTPResponseHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponseHeader",
+ Fn: logResponseHeader,
+}
+
+func logResponseHeader(r *request.Request) {
+ if r.Config.Logger == nil {
+ return
+ }
+
+ b, err := httputil.DumpResponse(r.HTTPResponse, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 000000000..0c48f72e0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,14 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ ServiceID string
+ APIVersion string
+ PartitionID string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go
new file mode 100644
index 000000000..881d575f0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// NoOpRetryer provides a retryer that performs no retries.
+// It should be used when we do not want retries to be performed.
+type NoOpRetryer struct{}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API; For NoOpRetryer the MaxRetries will always be zero.
+func (d NoOpRetryer) MaxRetries() int {
+ return 0
+}
+
+// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
+func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
+ return false
+}
+
+// RetryRules returns the delay duration before retrying this request again;
+// since NoOpRetryer does not retry, RetryRules always returns 0.
+func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
+ return 0
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 000000000..8a7699b96
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,550 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig structure.
+//
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to
+ // retrieve credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // Note: You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
+ // Regions and Endpoints.
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service
+ // specific configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the client.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for
+ // missing required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+ // will use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // Note: This configuration option is specific to the Amazon S3 service.
+ //
+ // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // for Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait
+ // timeout. https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disble 100-Continue if you experience issues
+ // with proxies or third party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations
+ // compatible with S3 Accelerate will use the accelerate endpoint for
+ // requests. Requests not compatible will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with
+ // accelerate enabled. If the bucket is not enabled for accelerate an error
+ // will be returned. The bucket name must be DNS compatible to also work
+ // with accelerate.
+ S3UseAccelerate *bool
+
+ // S3DisableContentMD5Validation config option is temporarily disabled,
+ // For S3 GetObject API calls, #1837.
+ //
+ // Set this to `true` to disable the S3 service client from automatically
+ // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
+ // will also disable the SDK from performing object ContentMD5 validation
+ // on GetObject API calls.
+ S3DisableContentMD5Validation *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the
+ // EC2Metadata client to create a new http.Client. This options is only
+ // meaningful if you're not already using a custom HTTP client with the
+ // SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.NewSession() in order to disable
+ // the EC2Metadata overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDiableTimeoutOverride(true)))
+ //
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ // Instructs the endpoint to be generated for a service client to
+ // be the dual stack endpoint. The dual stack endpoint will support
+ // both IPv4 and IPv6 addressing.
+ //
+ // Setting this for a service which does not support dual stack will fail
+ // to make requets. It is not recommended to set this value on the session
+ // as it will apply to all service clients created with the session. Even
+ // services which don't support dual stack endpoints.
+ //
+ // If the Endpoint config value is also provided the UseDualStack flag
+ // will be ignored.
+ //
+ // Only supported with.
+ //
+ // sess := session.Must(session.NewSession())
+ //
+ // svc := s3.New(sess, &aws.Config{
+ // UseDualStack: aws.Bool(true),
+ // })
+ UseDualStack *bool
+
+ // SleepDelay is an override for the func the SDK will call when sleeping
+ // during the lifecycle of a request. Specifically this will be used for
+ // request delays. This value should only be used for testing. To adjust
+ // the delay of a request see the aws/client.DefaultRetryer and
+ // aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
+ SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
+
+ // EnableEndpointDiscovery will allow for endpoint discovery on operations that
+ // have the definition in its model. By default, endpoint discovery is off.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // EnableEndpointDiscovery: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("/foo/bar/moo"),
+ // })
+ EnableEndpointDiscovery *bool
+
+ // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
+ // request endpoint hosts with modeled information.
+ //
+ // Disabling this feature is useful when you want to use local endpoints
+ // for testing that do not support the modeled host prefix pattern.
+ DisableEndpointHostPrefix *bool
+
+ // STSRegionalEndpoint will enable regional or legacy endpoint resolving
+ STSRegionalEndpoint endpoints.STSRegionalEndpoint
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+// // Create Session with MaxRetries configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = ®ion
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+
+}
+
+// WithS3DisableContentMD5Validation sets a config
+// S3DisableContentMD5Validation value returning a Config pointer for chaining.
+func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
+ c.S3DisableContentMD5Validation = &enable
+ return c
+
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+ c.UseDualStack = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// WithEndpointDiscovery will set whether or not to use endpoint discovery.
+func (c *Config) WithEndpointDiscovery(t bool) *Config {
+ c.EnableEndpointDiscovery = &t
+ return c
+}
+
+// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
+// when making requests.
+func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
+ c.DisableEndpointHostPrefix = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
+// when resolving the endpoint for a service
+func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
+ c.STSRegionalEndpoint = sre
+ return c
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.S3DisableContentMD5Validation != nil {
+ dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
+ }
+
+ if other.UseDualStack != nil {
+ dst.UseDualStack = other.UseDualStack
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
+
+ if other.EnableEndpointDiscovery != nil {
+ dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
+ }
+
+ if other.DisableEndpointHostPrefix != nil {
+ dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
+ }
+
+ if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
+ dst.STSRegionalEndpoint = other.STSRegionalEndpoint
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
new file mode 100644
index 000000000..2866f9a7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
@@ -0,0 +1,37 @@
+// +build !go1.9
+
+package aws
+
+import "time"
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
new file mode 100644
index 000000000..3718b26e1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
@@ -0,0 +1,11 @@
+// +build go1.9
+
+package aws
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
new file mode 100644
index 000000000..66c5945db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
@@ -0,0 +1,56 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
new file mode 100644
index 000000000..9c29f29af
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
@@ -0,0 +1,20 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return context.Background()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
new file mode 100644
index 000000000..304fd1561
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
@@ -0,0 +1,24 @@
+package aws
+
+import (
+ "time"
+)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 000000000..4e076c183
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,918 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint returns a pointer to the uint value passed in.
+func Uint(v uint) *uint {
+ return &v
+}
+
+// UintValue returns the value of the uint pointer passed in or
+// 0 if the pointer is nil.
+func UintValue(v *uint) uint {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// UintSlice converts a slice of uint values uinto a slice of
+// uint pointers
+func UintSlice(src []uint) []*uint {
+ dst := make([]*uint, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// UintValueSlice converts a slice of uint pointers uinto a slice of
+// uint values
+func UintValueSlice(src []*uint) []uint {
+ dst := make([]uint, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// UintMap converts a string map of uint values uinto a string
+// map of uint pointers
+func UintMap(src map[string]uint) map[string]*uint {
+ dst := make(map[string]*uint)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// UintValueMap converts a string map of uint pointers uinto a string
+// map of uint values
+func UintValueMap(src map[string]*uint) map[string]uint {
+ dst := make(map[string]uint)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int8 returns a pointer to the int8 value passed in.
+func Int8(v int8) *int8 {
+ return &v
+}
+
+// Int8Value returns the value of the int8 pointer passed in or
+// 0 if the pointer is nil.
+func Int8Value(v *int8) int8 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int8Slice converts a slice of int8 values into a slice of
+// int8 pointers
+func Int8Slice(src []int8) []*int8 {
+ dst := make([]*int8, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int8ValueSlice converts a slice of int8 pointers into a slice of
+// int8 values
+func Int8ValueSlice(src []*int8) []int8 {
+ dst := make([]int8, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int8Map converts a string map of int8 values into a string
+// map of int8 pointers
+func Int8Map(src map[string]int8) map[string]*int8 {
+ dst := make(map[string]*int8)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int8ValueMap converts a string map of int8 pointers into a string
+// map of int8 values
+func Int8ValueMap(src map[string]*int8) map[string]int8 {
+ dst := make(map[string]int8)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int16 returns a pointer to the int16 value passed in.
+func Int16(v int16) *int16 {
+ return &v
+}
+
+// Int16Value returns the value of the int16 pointer passed in or
+// 0 if the pointer is nil.
+func Int16Value(v *int16) int16 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int16Slice converts a slice of int16 values into a slice of
+// int16 pointers
+func Int16Slice(src []int16) []*int16 {
+ dst := make([]*int16, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int16ValueSlice converts a slice of int16 pointers into a slice of
+// int16 values
+func Int16ValueSlice(src []*int16) []int16 {
+ dst := make([]int16, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int16Map converts a string map of int16 values into a string
+// map of int16 pointers
+func Int16Map(src map[string]int16) map[string]*int16 {
+ dst := make(map[string]*int16)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int16ValueMap converts a string map of int16 pointers into a string
+// map of int16 values
+func Int16ValueMap(src map[string]*int16) map[string]int16 {
+ dst := make(map[string]int16)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int32 returns a pointer to the int32 value passed in.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int32Value returns the value of the int32 pointer passed in or
+// 0 if the pointer is nil.
+func Int32Value(v *int32) int32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int32Slice converts a slice of int32 values into a slice of
+// int32 pointers
+func Int32Slice(src []int32) []*int32 {
+ dst := make([]*int32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int32ValueSlice converts a slice of int32 pointers into a slice of
+// int32 values
+func Int32ValueSlice(src []*int32) []int32 {
+ dst := make([]int32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int32Map converts a string map of int32 values into a string
+// map of int32 pointers
+func Int32Map(src map[string]int32) map[string]*int32 {
+ dst := make(map[string]*int32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int32ValueMap converts a string map of int32 pointers into a string
+// map of int32 values
+func Int32ValueMap(src map[string]*int32) map[string]int32 {
+ dst := make(map[string]int32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint8 returns a pointer to the uint8 value passed in.
+func Uint8(v uint8) *uint8 {
+ return &v
+}
+
+// Uint8Value returns the value of the uint8 pointer passed in or
+// 0 if the pointer is nil.
+func Uint8Value(v *uint8) uint8 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint8Slice converts a slice of uint8 values into a slice of
+// uint8 pointers
+func Uint8Slice(src []uint8) []*uint8 {
+ dst := make([]*uint8, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
+// uint8 values
+func Uint8ValueSlice(src []*uint8) []uint8 {
+ dst := make([]uint8, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint8Map converts a string map of uint8 values into a string
+// map of uint8 pointers
+func Uint8Map(src map[string]uint8) map[string]*uint8 {
+ dst := make(map[string]*uint8)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint8ValueMap converts a string map of uint8 pointers into a string
+// map of uint8 values
+func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
+ dst := make(map[string]uint8)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint16 returns a pointer to the uint16 value passed in.
+func Uint16(v uint16) *uint16 {
+ return &v
+}
+
+// Uint16Value returns the value of the uint16 pointer passed in or
+// 0 if the pointer is nil.
+func Uint16Value(v *uint16) uint16 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint16Slice converts a slice of uint16 values into a slice of
+// uint16 pointers
+func Uint16Slice(src []uint16) []*uint16 {
+ dst := make([]*uint16, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
+// uint16 values
+func Uint16ValueSlice(src []*uint16) []uint16 {
+ dst := make([]uint16, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint16Map converts a string map of uint16 values into a string
+// map of uint16 pointers
+func Uint16Map(src map[string]uint16) map[string]*uint16 {
+ dst := make(map[string]*uint16)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint16ValueMap converts a string map of uint16 pointers into a string
+// map of uint16 values
+func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
+ dst := make(map[string]uint16)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint32 returns a pointer to the uint32 value passed in.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint32Value returns the value of the uint32 pointer passed in or
+// 0 if the pointer is nil.
+func Uint32Value(v *uint32) uint32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint32Slice converts a slice of uint32 values into a slice of
+// uint32 pointers
+func Uint32Slice(src []uint32) []*uint32 {
+ dst := make([]*uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
+// uint32 values
+func Uint32ValueSlice(src []*uint32) []uint32 {
+ dst := make([]uint32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint32Map converts a string map of uint32 values into a string
+// map of uint32 pointers
+func Uint32Map(src map[string]uint32) map[string]*uint32 {
+ dst := make(map[string]*uint32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint32ValueMap converts a string map of uint32 pointers into a string
+// map of uint32 values
+func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
+ dst := make(map[string]uint32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Uint64 returns a pointer to the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// Uint64Value returns the value of the uint64 pointer passed in or
+// 0 if the pointer is nil.
+func Uint64Value(v *uint64) uint64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Uint64Slice converts a slice of uint64 values into a slice of
+// uint64 pointers
+func Uint64Slice(src []uint64) []*uint64 {
+ dst := make([]*uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
+// uint64 values
+func Uint64ValueSlice(src []*uint64) []uint64 {
+ dst := make([]uint64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Uint64Map converts a string map of uint64 values into a string
+// map of uint64 pointers
+func Uint64Map(src map[string]uint64) map[string]*uint64 {
+ dst := make(map[string]*uint64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Uint64ValueMap converts a string map of uint64 pointers into a string
+// map of uint64 values
+func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
+ dst := make(map[string]uint64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float32 returns a pointer to the float32 value passed in.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float32Value returns the value of the float32 pointer passed in or
+// 0 if the pointer is nil.
+func Float32Value(v *float32) float32 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float32Slice converts a slice of float32 values into a slice of
+// float32 pointers
+func Float32Slice(src []float32) []*float32 {
+ dst := make([]*float32, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float32ValueSlice converts a slice of float32 pointers into a slice of
+// float32 values
+func Float32ValueSlice(src []*float32) []float32 {
+ dst := make([]float32, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float32Map converts a string map of float32 values into a string
+// map of float32 pointers
+func Float32Map(src map[string]float32) map[string]*float32 {
+ dst := make(map[string]*float32)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float32ValueMap converts a string map of float32 pointers into a string
+// map of float32 values
+func Float32ValueMap(src map[string]*float32) map[string]float32 {
+ dst := make(map[string]float32)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 000000000..0c60e612e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,230 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ if r.Body != nil {
+ var err error
+ length, err = aws.SeekerLen(r.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
+ return
+ }
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *request.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 5 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(5 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ fmt.Println("request expired, resigning")
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ if request.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all request errors, and let the default retrier determine
+ // if the error is retryable.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{
+ Name: "core.AfterRetryHandler",
+ Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+ }}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 000000000..7d50b1557
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
new file mode 100644
index 000000000..ab69c7a6f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
@@ -0,0 +1,37 @@
+package corehandlers
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
+// to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+const execEnvUAKey = `exec-env`
+
+// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
+// execution environment to the user agent.
+//
+// If the environment variable AWS_EXECUTION_ENV is set, its value will be
+// appended to the user agent string.
+var AddHostExecEnvUserAgentHander = request.NamedHandler{
+ Name: "core.AddHostExecEnvUserAgentHander",
+ Fn: func(r *request.Request) {
+ v := os.Getenv(execEnvVar)
+ if len(v) == 0 {
+ return
+ }
+
+ request.AddToUserAgent(r, execEnvUAKey+"/"+v)
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 000000000..3ad1e798d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true.
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 000000000..4af592158
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,299 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := credentials.NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := credentials.NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
+// // Access public S3 buckets.
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// HasKeys returns if the credentials Value has both AccessKeyID and
+// SecretAccessKey value set.
+func (v Value) HasKeys() bool {
+ return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// An Expirer is an interface that Providers can implement to expose the expiration
+// time, if known. If the Provider cannot accurately provide this info,
+// it should not implement this interface.
+type Expirer interface {
+ // The time at which the credentials are no longer valid
+ ExpiresAt() time.Time
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ curTime := e.CurrentTime
+ if curTime == nil {
+ curTime = time.Now
+ }
+ return e.expiration.Before(curTime())
+}
+
+// ExpiresAt returns the expiration time of the credential
+func (e *Expiry) ExpiresAt() time.Time {
+ return e.expiration
+}
+
+// A Credentials provides concurrency safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+
+ m sync.RWMutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ // Check the cached credentials first with just the read lock.
+ c.m.RLock()
+ if !c.isExpired() {
+ creds := c.creds
+ c.m.RUnlock()
+ return creds, nil
+ }
+ c.m.RUnlock()
+
+ // Credentials are expired need to retrieve the credentials taking the full
+ // lock.
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
+
+// ExpiresAt provides access to the functionality of the Expirer interface of
+// the underlying Provider, if it supports that interface. Otherwise, it returns
+// an error.
+func (c *Credentials) ExpiresAt() (time.Time, error) {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ expirer, ok := c.provider.(Expirer)
+ if !ok {
+ return time.Time{}, awserr.New("ProviderNotExpirer",
+ fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
+ nil)
+ }
+ if c.forceRefresh {
+ // set expiration time to the distant past
+ return time.Time{}, nil
+ }
+ return expirer.ExpiresAt(), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 000000000..43d4ed386
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,180 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New(request.ErrCodeSerialization,
+ "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New(request.ErrCodeSerialization,
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644
index 000000000..1a7af53a4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,203 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ staticCreds bool
+ credentials.Expiry
+
+ // Requires a AWS Client to make HTTP requests to the endpoint with.
+ // the Endpoint the request will be made to is provided by the aws.Config's
+ // Endpoint value.
+ Client *client.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ AuthorizationToken string
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+ p := &Provider{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "CredentialsEndpoint",
+ Endpoint: endpoint,
+ },
+ handlers,
+ ),
+ }
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// NewCredentialsClient returns a pointer to a new Credentials object
+// wrapping the endpoint credentials Provider.
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+ return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ resp, err := p.getCredentials()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ } else {
+ p.staticCreds = true
+ }
+
+ return credentials.Value{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+ op := &request.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+ if authToken := p.AuthorizationToken; len(authToken) != 0 {
+ req.HTTPRequest.Header.Set("Authorization", authToken)
+ }
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 000000000..54c5cf733
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,74 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644
index 000000000..7fc91d9d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
new file mode 100644
index 000000000..1980c8c14
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -0,0 +1,425 @@
+/*
+Package processcreds is a credential Provider to retrieve `credential_process`
+credentials.
+
+WARNING: The following describes a method of sourcing credentials from an external
+process. This can potentially be dangerous, so proceed with caution. Other
+credential providers should be preferred if at all possible. If using this
+option, you should make sure that the config file is as locked down as possible
+using security best practices for your operating system.
+
+You can use credentials from a `credential_process` in a variety of ways.
+
+One way is to setup your shared config file, located in the default
+location, with the `credential_process` key and the command you want to be
+called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+
+ [default]
+ credential_process = /command/to/call
+
+Creating a new session will use the credential process to retrieve credentials.
+NOTE: If there are credentials in the profile you are using, the credential
+process will not be used.
+
+ // Initialize a session to load credentials.
+ sess, _ := session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1")},
+ )
+
+ // Create S3 service client to use the credentials.
+ svc := s3.New(sess)
+
+Another way to use the `credential_process` method is by using
+`credentials.NewCredentials()` and providing a command to be executed to
+retrieve credentials:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentials("/path/to/command")
+
+ // Create service client value configured for credentials.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+You can set a non-default timeout for the `credential_process` with another
+constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
+set a one minute timeout:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentialsTimeout(
+ "/path/to/command",
+ time.Duration(500) * time.Millisecond)
+
+If you need more control, you can set any configurable options in the
+credentials using one or more option functions. For example, you can set a two
+minute timeout, a credential duration of 60 minutes, and a maximum stdout
+buffer size of 2k.
+
+ creds := processcreds.NewCredentials(
+ "/path/to/command",
+ func(opt *ProcessProvider) {
+ opt.Timeout = time.Duration(2) * time.Minute
+ opt.Duration = time.Duration(60) * time.Minute
+ opt.MaxBufSize = 2048
+ })
+
+You can also use your own `exec.Cmd`:
+
+ // Create an exec.Cmd
+ myCommand := exec.Command("/path/to/command")
+
+ // Create credentials using your exec.Cmd and custom timeout
+ creds := processcreds.NewCredentialsCommand(
+ myCommand,
+ func(opt *processcreds.ProcessProvider) {
+ opt.Timeout = time.Duration(1) * time.Second
+ })
+*/
+package processcreds
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // ErrCodeProcessProviderParse error parsing process output
+ ErrCodeProcessProviderParse = "ProcessProviderParseError"
+
+ // ErrCodeProcessProviderVersion version error in output
+ ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
+
+ // ErrCodeProcessProviderRequired required attribute missing in output
+ ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
+
+ // ErrCodeProcessProviderExecution execution of command failed
+ ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
+
+ // errMsgProcessProviderTimeout process took longer than allowed
+ errMsgProcessProviderTimeout = "credential process timed out"
+
+ // errMsgProcessProviderProcess process error
+ errMsgProcessProviderProcess = "error in credential_process"
+
+ // errMsgProcessProviderParse problem parsing output
+ errMsgProcessProviderParse = "parse failed of credential_process output"
+
+ // errMsgProcessProviderVersion version error in output
+ errMsgProcessProviderVersion = "wrong version in process output (not 1)"
+
+ // errMsgProcessProviderMissKey missing access key id in output
+ errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
+
+ // errMsgProcessProviderMissSecret missing secret acess key in output
+ errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
+
+ // errMsgProcessProviderPrepareCmd prepare of command failed
+ errMsgProcessProviderPrepareCmd = "failed to prepare command"
+
+ // errMsgProcessProviderEmptyCmd command must not be empty
+ errMsgProcessProviderEmptyCmd = "command must not be empty"
+
+ // errMsgProcessProviderPipe failed to initialize pipe
+ errMsgProcessProviderPipe = "failed to initialize pipe"
+
+ // DefaultDuration is the default amount of time in minutes that the
+ // credentials will be valid for.
+ DefaultDuration = time.Duration(15) * time.Minute
+
+ // DefaultBufSize limits buffer size from growing to an enormous
+ // amount due to a faulty process.
+ DefaultBufSize = 1024
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProcessProvider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type ProcessProvider struct {
+ staticCreds bool
+ credentials.Expiry
+ originalCommand []string
+
+ // Expiry duration of the credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // A string representing an os command that should return a JSON with
+ // credential information.
+ command *exec.Cmd
+
+ // MaxBufSize limits memory usage from growing to an enormous
+ // amount due to a faulty process.
+ MaxBufSize int
+
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// ProcessProvider. The credentials will expire every 15 minutes by default.
+func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: exec.Command(command),
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsTimeout returns a pointer to a new Credentials object with
+// the specified command and timeout, and default duration and max buffer size.
+func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
+ p := NewCredentials(command, func(opt *ProcessProvider) {
+ opt.Timeout = timeout
+ })
+
+ return p
+}
+
+// NewCredentialsCommand returns a pointer to a new Credentials object with
+// the specified command, and default timeout, duration and max buffer size.
+func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: command,
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+type credentialProcessResponse struct {
+ Version int
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ SessionToken string
+ Expiration *time.Time
+}
+
+// Retrieve executes the 'credential_process' and returns the credentials.
+func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
+ out, err := p.executeCredentialProcess()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &credentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderParse,
+ fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
+ err)
+ }
+
+ if resp.Version != 1 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderVersion,
+ errMsgProcessProviderVersion,
+ nil)
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissKey,
+ nil)
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissSecret,
+ nil)
+ }
+
+ // Handle expiration
+ p.staticCreds = resp.Expiration == nil
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ }
+
+ return credentials.Value{
+ ProviderName: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ }, nil
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *ProcessProvider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// prepareCommand prepares the command to be executed.
+func (p *ProcessProvider) prepareCommand() error {
+
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(p.originalCommand) == 0 {
+ p.originalCommand = make([]string, len(p.command.Args))
+ copy(p.originalCommand, p.command.Args)
+
+ // check for empty command because it succeeds
+ if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
+ return awserr.New(
+ ErrCodeProcessProviderExecution,
+ fmt.Sprintf(
+ "%s: %s",
+ errMsgProcessProviderPrepareCmd,
+ errMsgProcessProviderEmptyCmd),
+ nil)
+ }
+ }
+
+ cmdArgs = append(cmdArgs, p.originalCommand...)
+ p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ p.command.Env = os.Environ()
+
+ return nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
+
+ if err := p.prepareCommand(); err != nil {
+ return nil, err
+ }
+
+ // Setup the pipes
+ outReadPipe, outWritePipe, err := os.Pipe()
+ if err != nil {
+ return nil, awserr.New(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderPipe,
+ err)
+ }
+
+ p.command.Stderr = os.Stderr // display stderr on console for MFA
+ p.command.Stdout = outWritePipe // get creds json on process's stdout
+ p.command.Stdin = os.Stdin // enable stdin for MFA
+
+ output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
+
+ stdoutCh := make(chan error, 1)
+ go readInput(
+ io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
+ output,
+ stdoutCh)
+
+ execCh := make(chan error, 1)
+ go executeCommand(*p.command, execCh)
+
+ finished := false
+ var errors []error
+ for !finished {
+ select {
+ case readError := <-stdoutCh:
+ errors = appendError(errors, readError)
+ finished = true
+ case execError := <-execCh:
+ err := outWritePipe.Close()
+ errors = appendError(errors, err)
+ errors = appendError(errors, execError)
+ if errors != nil {
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderProcess,
+ errors)
+ }
+ case <-time.After(p.Timeout):
+ finished = true
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderTimeout,
+ errors) // errors can be nil
+ }
+ }
+
+ out := output.Bytes()
+
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
+ }
+
+ return out, nil
+}
+
+// appendError conveniently checks for nil before appending slice
+func appendError(errors []error, err error) []error {
+ if err != nil {
+ return append(errors, err)
+ }
+ return errors
+}
+
+func executeCommand(cmd exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
+
+func readInput(r io.Reader, w io.Writer, read chan error) {
+ tee := io.TeeReader(r, w)
+
+ _, err := ioutil.ReadAll(tee)
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ read <- err // will only arrive here when write end of pipe is closed
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 000000000..e15514958
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,150 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/ini"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.OpenFile(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+
+ iniProfile, ok := config.GetSection(profile)
+ if !ok {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
+ }
+
+ id := iniProfile.String("aws_access_key_id")
+ if len(id) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ nil)
+ }
+
+ secret := iniProfile.String("aws_secret_access_key")
+ if len(secret) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.String("aws_session_token")
+
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if home := shareddefaults.UserHomeDir(); len(home) == 0 {
+ // Backwards compatibility of home directly not found error being returned.
+ // This error is too verbose, failure when opening the file would of been
+ // a better error to return.
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = shareddefaults.SharedCredentialsFilename()
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 000000000..531139e39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,55 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+ return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(s.Value.ProviderName) == 0 {
+ s.Value.ProviderName = StaticProviderName
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 000000000..2e528d130
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,312 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
+ TokenCode *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // MaxJitterFrac reduces the effective Duration of each credential requested
+ // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
+ // have a value between 0 and 1. Any other value may lead to expected behavior.
+ // With a MaxJitterFrac value of 0, default) will no jitter will be used.
+ //
+ // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
+ // AssumeRole call will be made with an arbitrary Duration between 27m and
+ // 30m.
+ //
+ // MaxJitterFrac should not be negative.
+ MaxJitterFrac float64
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+ // Apply defaults where parameters are not set.
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+ jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ }
+ if p.Policy != nil {
+ input.Policy = p.Policy
+ }
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
+ }
+
+ roleOutput, err := p.Client.AssumeRole(input)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyId,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ ProviderName: ProviderName,
+ }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
new file mode 100644
index 000000000..b20b63394
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
@@ -0,0 +1,100 @@
+package stscreds
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/service/sts"
+ "github.com/aws/aws-sdk-go/service/sts/stsiface"
+)
+
+const (
+ // ErrCodeWebIdentity will be used as an error code when constructing
+ // a new error to be returned during session creation or retrieval.
+ ErrCodeWebIdentity = "WebIdentityErr"
+
+ // WebIdentityProviderName is the web identity provider name
+ WebIdentityProviderName = "WebIdentityCredentials"
+)
+
+// now is used to return a time.Time object representing
+// the current time. This can be used to easily test and
+// compare test values.
+var now = time.Now
+
+// WebIdentityRoleProvider is used to retrieve credentials using
+// an OIDC token.
+type WebIdentityRoleProvider struct {
+ credentials.Expiry
+
+ client stsiface.STSAPI
+ ExpiryWindow time.Duration
+
+ tokenFilePath string
+ roleARN string
+ roleSessionName string
+}
+
+// NewWebIdentityCredentials will return a new set of credentials with a given
+// configuration, role arn, and token file path.
+func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
+ svc := sts.New(c)
+ p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
+ return credentials.NewCredentials(p)
+}
+
+// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
+// provided stsiface.STSAPI
+func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
+ return &WebIdentityRoleProvider{
+ client: svc,
+ tokenFilePath: path,
+ roleARN: roleARN,
+ roleSessionName: roleSessionName,
+ }
+}
+
+// Retrieve will attempt to assume a role from a token which is located at
+// 'WebIdentityTokenFilePath' specified destination and if that is empty an
+// error will be returned.
+func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
+ b, err := ioutil.ReadFile(p.tokenFilePath)
+ if err != nil {
+ errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
+ return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
+ }
+
+ sessionName := p.roleSessionName
+ if len(sessionName) == 0 {
+ // session name is used to uniquely identify a session. This simply
+ // uses unix time in nanoseconds to uniquely identify sessions.
+ sessionName = strconv.FormatInt(now().UnixNano(), 10)
+ }
+ req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
+ RoleArn: &p.roleARN,
+ RoleSessionName: &sessionName,
+ WebIdentityToken: aws.String(string(b)),
+ })
+ // InvalidIdentityToken error is a temporary error that can occur
+ // when assuming an Role with a JWT web identity token.
+ req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
+ if err := req.Send(); err != nil {
+ return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
+ }
+
+ p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
+
+ value := credentials.Value{
+ AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
+ SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
+ SessionToken: aws.StringValue(resp.Credentials.SessionToken),
+ ProviderName: WebIdentityProviderName,
+ }
+ return value, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go
new file mode 100644
index 000000000..a00ab6c67
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go
@@ -0,0 +1,119 @@
+package crr
+
+import (
+ "sync/atomic"
+)
+
+// EndpointCache is an LRU cache that holds a series of endpoints
+// based on some key. The datastructure makes use of a read write
+// mutex to enable asynchronous use.
+type EndpointCache struct {
+ endpoints syncMap
+ endpointLimit int64
+ // size is used to count the number elements in the cache.
+ // The atomic package is used to ensure this size is accurate when
+ // using multiple goroutines.
+ size int64
+}
+
+// NewEndpointCache will return a newly initialized cache with a limit
+// of endpointLimit entries.
+func NewEndpointCache(endpointLimit int64) *EndpointCache {
+ return &EndpointCache{
+ endpointLimit: endpointLimit,
+ endpoints: newSyncMap(),
+ }
+}
+
+// get is a concurrent safe get operation that will retrieve an endpoint
+// based on endpointKey. A boolean will also be returned to illustrate whether
+// or not the endpoint had been found.
+func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) {
+ endpoint, ok := c.endpoints.Load(endpointKey)
+ if !ok {
+ return Endpoint{}, false
+ }
+
+ c.endpoints.Store(endpointKey, endpoint)
+ return endpoint.(Endpoint), true
+}
+
+// Has returns if the enpoint cache contains a valid entry for the endpoint key
+// provided.
+func (c *EndpointCache) Has(endpointKey string) bool {
+ endpoint, ok := c.get(endpointKey)
+ _, found := endpoint.GetValidAddress()
+
+ return ok && found
+}
+
+// Get will retrieve a weighted address based off of the endpoint key. If an endpoint
+// should be retrieved, due to not existing or the current endpoint has expired
+// the Discoverer object that was passed in will attempt to discover a new endpoint
+// and add that to the cache.
+func (c *EndpointCache) Get(d Discoverer, endpointKey string, required bool) (WeightedAddress, error) {
+ var err error
+ endpoint, ok := c.get(endpointKey)
+ weighted, found := endpoint.GetValidAddress()
+ shouldGet := !ok || !found
+
+ if required && shouldGet {
+ if endpoint, err = c.discover(d, endpointKey); err != nil {
+ return WeightedAddress{}, err
+ }
+
+ weighted, _ = endpoint.GetValidAddress()
+ } else if shouldGet {
+ go c.discover(d, endpointKey)
+ }
+
+ return weighted, nil
+}
+
+// Add is a concurrent safe operation that will allow new endpoints to be added
+// to the cache. If the cache is full, the number of endpoints equal endpointLimit,
+// then this will remove the oldest entry before adding the new endpoint.
+func (c *EndpointCache) Add(endpoint Endpoint) {
+ // de-dups multiple adds of an endpoint with a pre-existing key
+ if iface, ok := c.endpoints.Load(endpoint.Key); ok {
+ e := iface.(Endpoint)
+ if e.Len() > 0 {
+ return
+ }
+ }
+ c.endpoints.Store(endpoint.Key, endpoint)
+
+ size := atomic.AddInt64(&c.size, 1)
+ if size > 0 && size > c.endpointLimit {
+ c.deleteRandomKey()
+ }
+}
+
+// deleteRandomKey will delete a random key from the cache. If
+// no key was deleted false will be returned.
+func (c *EndpointCache) deleteRandomKey() bool {
+ atomic.AddInt64(&c.size, -1)
+ found := false
+
+ c.endpoints.Range(func(key, value interface{}) bool {
+ found = true
+ c.endpoints.Delete(key)
+
+ return false
+ })
+
+ return found
+}
+
+// discover will get and store and endpoint using the Discoverer.
+func (c *EndpointCache) discover(d Discoverer, endpointKey string) (Endpoint, error) {
+ endpoint, err := d.Discover()
+ if err != nil {
+ return Endpoint{}, err
+ }
+
+ endpoint.Key = endpointKey
+ c.Add(endpoint)
+
+ return endpoint, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go
new file mode 100644
index 000000000..d5599188e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go
@@ -0,0 +1,99 @@
+package crr
+
+import (
+ "net/url"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// Endpoint represents an endpoint used in endpoint discovery.
+type Endpoint struct {
+ Key string
+ Addresses WeightedAddresses
+}
+
+// WeightedAddresses represents a list of WeightedAddress.
+type WeightedAddresses []WeightedAddress
+
+// WeightedAddress represents an address with a given weight.
+type WeightedAddress struct {
+ URL *url.URL
+ Expired time.Time
+}
+
+// HasExpired will return whether or not the endpoint has expired with
+// the exception of a zero expiry meaning does not expire.
+func (e WeightedAddress) HasExpired() bool {
+ return e.Expired.Before(time.Now())
+}
+
+// Add will add a given WeightedAddress to the address list of Endpoint.
+func (e *Endpoint) Add(addr WeightedAddress) {
+ e.Addresses = append(e.Addresses, addr)
+}
+
+// Len returns the number of valid endpoints where valid means the endpoint
+// has not expired.
+func (e *Endpoint) Len() int {
+ validEndpoints := 0
+ for _, endpoint := range e.Addresses {
+ if endpoint.HasExpired() {
+ continue
+ }
+
+ validEndpoints++
+ }
+ return validEndpoints
+}
+
+// GetValidAddress will return a non-expired weight endpoint
+func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) {
+ for i := 0; i < len(e.Addresses); i++ {
+ we := e.Addresses[i]
+
+ if we.HasExpired() {
+ e.Addresses = append(e.Addresses[:i], e.Addresses[i+1:]...)
+ i--
+ continue
+ }
+
+ return we, true
+ }
+
+ return WeightedAddress{}, false
+}
+
+// Discoverer is an interface used to discovery which endpoint hit. This
+// allows for specifics about what parameters need to be used to be contained
+// in the Discoverer implementor.
+type Discoverer interface {
+ Discover() (Endpoint, error)
+}
+
+// BuildEndpointKey will sort the keys in alphabetical order and then retrieve
+// the values in that order. Those values are then concatenated together to form
+// the endpoint key.
+func BuildEndpointKey(params map[string]*string) string {
+ keys := make([]string, len(params))
+ i := 0
+
+ for k := range params {
+ keys[i] = k
+ i++
+ }
+ sort.Strings(keys)
+
+ values := make([]string, len(params))
+ for i, k := range keys {
+ if params[k] == nil {
+ continue
+ }
+
+ values[i] = aws.StringValue(params[k])
+ }
+
+ return strings.Join(values, ".")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go
new file mode 100644
index 000000000..e414eaace
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go
@@ -0,0 +1,29 @@
+// +build go1.9
+
+package crr
+
+import (
+ "sync"
+)
+
+type syncMap sync.Map
+
+func newSyncMap() syncMap {
+ return syncMap{}
+}
+
+func (m *syncMap) Load(key interface{}) (interface{}, bool) {
+ return (*sync.Map)(m).Load(key)
+}
+
+func (m *syncMap) Store(key interface{}, value interface{}) {
+ (*sync.Map)(m).Store(key, value)
+}
+
+func (m *syncMap) Delete(key interface{}) {
+ (*sync.Map)(m).Delete(key)
+}
+
+func (m *syncMap) Range(f func(interface{}, interface{}) bool) {
+ (*sync.Map)(m).Range(f)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go
new file mode 100644
index 000000000..e0b122008
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go
@@ -0,0 +1,48 @@
+// +build !go1.9
+
+package crr
+
+import (
+ "sync"
+)
+
+type syncMap struct {
+ container map[interface{}]interface{}
+ lock sync.RWMutex
+}
+
+func newSyncMap() syncMap {
+ return syncMap{
+ container: map[interface{}]interface{}{},
+ }
+}
+
+func (m *syncMap) Load(key interface{}) (interface{}, bool) {
+ m.lock.RLock()
+ defer m.lock.RUnlock()
+
+ v, ok := m.container[key]
+ return v, ok
+}
+
+func (m *syncMap) Store(key interface{}, value interface{}) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ m.container[key] = value
+}
+
+func (m *syncMap) Delete(key interface{}) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ delete(m.container, key)
+}
+
+func (m *syncMap) Range(f func(interface{}, interface{}) bool) {
+ for k, v := range m.container {
+ if !f(k, v) {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
new file mode 100644
index 000000000..25a66d1dd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
@@ -0,0 +1,69 @@
+// Package csm provides the Client Side Monitoring (CSM) client which enables
+// sending metrics via UDP connection to the CSM agent. This package provides
+// control options, and configuration for the CSM client. The client can be
+// controlled manually, or automatically via the SDK's Session configuration.
+//
+// Enabling CSM client via SDK's Session configuration
+//
+// The CSM client can be enabled automatically via SDK's Session configuration.
+// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
+// environment variable is set to a non-empty value.
+//
+// The configuration options for the CSM client via the SDK's session
+// configuration are:
+//
+// * AWS_CSM_PORT=
+// The port number the CSM agent will receive metrics on.
+//
+// * AWS_CSM_HOST=
+// The hostname, or IP address the CSM agent will receive metrics on.
+// Without port number.
+//
+// Manually enabling the CSM client
+//
+// The CSM client can be started, paused, and resumed manually. The Start
+// function will enable the CSM client to publish metrics to the CSM agent. It
+// is safe to call Start concurrently, but if Start is called additional times
+// with different ClientID or address it will panic.
+//
+// r, err := csm.Start("clientID", ":31000")
+// if err != nil {
+// panic(fmt.Errorf("failed starting CSM: %v", err))
+// }
+//
+// When controlling the CSM client manually, you must also inject its request
+// handlers into the SDK's Session configuration for the SDK's API clients to
+// publish metrics.
+//
+// sess, err := session.NewSession(&aws.Config{})
+// if err != nil {
+// panic(fmt.Errorf("failed loading session: %v", err))
+// }
+//
+// // Add CSM client's metric publishing request handlers to the SDK's
+// // Session Configuration.
+// r.InjectHandlers(&sess.Handlers)
+//
+// Controlling CSM client
+//
+// Once the CSM client has been enabled the Get function will return a Reporter
+// value that you can use to pause and resume the metrics published to the CSM
+// agent. If Get function is called before the reporter is enabled with the
+// Start function or via SDK's Session configuration nil will be returned.
+//
+// The Pause method can be called to stop the CSM client publishing metrics to
+// the CSM agent. The Continue method will resume metric publishing.
+//
+// // Get the CSM client Reporter.
+// r := csm.Get()
+//
+// // Will pause monitoring
+// r.Pause()
+// resp, err = client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Resume monitoring
+// r.Continue()
+package csm
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
new file mode 100644
index 000000000..4b19e2800
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
@@ -0,0 +1,89 @@
+package csm
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+)
+
+var (
+ lock sync.Mutex
+)
+
+const (
+ // DefaultPort is used when no port is specified.
+ DefaultPort = "31000"
+
+ // DefaultHost is the host that will be used when none is specified.
+ DefaultHost = "127.0.0.1"
+)
+
+// AddressWithDefaults returns a CSM address built from the host and port
+// values. If the host or port is not set, default values will be used
+// instead. If host is "localhost" it will be replaced with "127.0.0.1".
+func AddressWithDefaults(host, port string) string {
+ if len(host) == 0 || strings.EqualFold(host, "localhost") {
+ host = DefaultHost
+ }
+
+ if len(port) == 0 {
+ port = DefaultPort
+ }
+
+ // Only IP6 host can contain a colon
+ if strings.Contains(host, ":") {
+ return "[" + host + "]:" + port
+ }
+
+ return host + ":" + port
+}
+
+// Start will start a long running go routine to capture
+// client side metrics. Calling start multiple time will only
+// start the metric listener once and will panic if a different
+// client ID or port is passed in.
+//
+// r, err := csm.Start("clientID", "127.0.0.1:31000")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+// sess := session.NewSession()
+// r.InjectHandlers(sess.Handlers)
+//
+// svc := s3.New(sess)
+// out, err := svc.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+func Start(clientID string, url string) (*Reporter, error) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if sender == nil {
+ sender = newReporter(clientID, url)
+ } else {
+ if sender.clientID != clientID {
+ panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
+ }
+
+ if sender.url != url {
+ panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
+ }
+ }
+
+ if err := connect(url); err != nil {
+ sender = nil
+ return nil, err
+ }
+
+ return sender, nil
+}
+
+// Get will return a reporter if one exists, if one does not exist, nil will
+// be returned.
+func Get() *Reporter {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return sender
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
new file mode 100644
index 000000000..5bacc791a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
@@ -0,0 +1,109 @@
+package csm
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+type metricTime time.Time
+
+func (t metricTime) MarshalJSON() ([]byte, error) {
+ ns := time.Duration(time.Time(t).UnixNano())
+ return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
+}
+
+type metric struct {
+ ClientID *string `json:"ClientId,omitempty"`
+ API *string `json:"Api,omitempty"`
+ Service *string `json:"Service,omitempty"`
+ Timestamp *metricTime `json:"Timestamp,omitempty"`
+ Type *string `json:"Type,omitempty"`
+ Version *int `json:"Version,omitempty"`
+
+ AttemptCount *int `json:"AttemptCount,omitempty"`
+ Latency *int `json:"Latency,omitempty"`
+
+ Fqdn *string `json:"Fqdn,omitempty"`
+ UserAgent *string `json:"UserAgent,omitempty"`
+ AttemptLatency *int `json:"AttemptLatency,omitempty"`
+
+ SessionToken *string `json:"SessionToken,omitempty"`
+ Region *string `json:"Region,omitempty"`
+ AccessKey *string `json:"AccessKey,omitempty"`
+ HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
+ XAmzID2 *string `json:"XAmzId2,omitempty"`
+ XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
+
+ AWSException *string `json:"AwsException,omitempty"`
+ AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
+ SDKException *string `json:"SdkException,omitempty"`
+ SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
+
+ FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
+ FinalAWSException *string `json:"FinalAwsException,omitempty"`
+ FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
+ FinalSDKException *string `json:"FinalSdkException,omitempty"`
+ FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
+
+ DestinationIP *string `json:"DestinationIp,omitempty"`
+ ConnectionReused *int `json:"ConnectionReused,omitempty"`
+
+ AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
+ ConnectLatency *int `json:"ConnectLatency,omitempty"`
+ RequestLatency *int `json:"RequestLatency,omitempty"`
+ DNSLatency *int `json:"DnsLatency,omitempty"`
+ TCPLatency *int `json:"TcpLatency,omitempty"`
+ SSLLatency *int `json:"SslLatency,omitempty"`
+
+ MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
+}
+
+func (m *metric) TruncateFields() {
+ m.ClientID = truncateString(m.ClientID, 255)
+ m.UserAgent = truncateString(m.UserAgent, 256)
+
+ m.AWSException = truncateString(m.AWSException, 128)
+ m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
+
+ m.SDKException = truncateString(m.SDKException, 128)
+ m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
+
+ m.FinalAWSException = truncateString(m.FinalAWSException, 128)
+ m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
+
+ m.FinalSDKException = truncateString(m.FinalSDKException, 128)
+ m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
+}
+
+func truncateString(v *string, l int) *string {
+ if v != nil && len(*v) > l {
+ nv := (*v)[:l]
+ return &nv
+ }
+
+ return v
+}
+
+func (m *metric) SetException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.AWSException = aws.String(te.exception)
+ m.AWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.SDKException = aws.String(te.exception)
+ m.SDKExceptionMessage = aws.String(te.message)
+ }
+}
+
+func (m *metric) SetFinalException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.FinalAWSException = aws.String(te.exception)
+ m.FinalAWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.FinalSDKException = aws.String(te.exception)
+ m.FinalSDKExceptionMessage = aws.String(te.message)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
new file mode 100644
index 000000000..82a3e345e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
@@ -0,0 +1,55 @@
+package csm
+
+import (
+ "sync/atomic"
+)
+
+const (
+ runningEnum = iota
+ pausedEnum
+)
+
+var (
+ // MetricsChannelSize of metrics to hold in the channel
+ MetricsChannelSize = 100
+)
+
+type metricChan struct {
+ ch chan metric
+ paused *int64
+}
+
+func newMetricChan(size int) metricChan {
+ return metricChan{
+ ch: make(chan metric, size),
+ paused: new(int64),
+ }
+}
+
+func (ch *metricChan) Pause() {
+ atomic.StoreInt64(ch.paused, pausedEnum)
+}
+
+func (ch *metricChan) Continue() {
+ atomic.StoreInt64(ch.paused, runningEnum)
+}
+
+func (ch *metricChan) IsPaused() bool {
+ v := atomic.LoadInt64(ch.paused)
+ return v == pausedEnum
+}
+
+// Push will push metrics to the metric channel if the channel
+// is not paused
+func (ch *metricChan) Push(m metric) bool {
+ if ch.IsPaused() {
+ return false
+ }
+
+ select {
+ case ch.ch <- m:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
new file mode 100644
index 000000000..54a99280c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
@@ -0,0 +1,26 @@
+package csm
+
+type metricException interface {
+ Exception() string
+ Message() string
+}
+
+type requestException struct {
+ exception string
+ message string
+}
+
+func (e requestException) Exception() string {
+ return e.exception
+}
+func (e requestException) Message() string {
+ return e.message
+}
+
+type awsException struct {
+ requestException
+}
+
+type sdkException struct {
+ requestException
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
new file mode 100644
index 000000000..9186587fc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -0,0 +1,264 @@
+package csm
+
+import (
+ "encoding/json"
+ "net"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Reporter will gather metrics of API requests made and
+// send those metrics to the CSM endpoint.
+type Reporter struct {
+ clientID string
+ url string
+ conn net.Conn
+ metricsCh metricChan
+ done chan struct{}
+}
+
+var (
+ sender *Reporter
+)
+
+func connect(url string) error {
+ const network = "udp"
+ if err := sender.connect(network, url); err != nil {
+ return err
+ }
+
+ if sender.done == nil {
+ sender.done = make(chan struct{})
+ go sender.start()
+ }
+
+ return nil
+}
+
+func newReporter(clientID, url string) *Reporter {
+ return &Reporter{
+ clientID: clientID,
+ url: url,
+ metricsCh: newMetricChan(MetricsChannelSize),
+ }
+}
+
+func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ creds, _ := r.Config.Credentials.Get()
+
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Region: r.Config.Region,
+ Type: aws.String("ApiCallAttempt"),
+ Version: aws.Int(1),
+
+ XAmzRequestID: aws.String(r.RequestID),
+
+ AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
+ AccessKey: aws.String(creds.AccessKeyID),
+ }
+
+ if r.HTTPResponse != nil {
+ m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+ rep.metricsCh.Push(m)
+}
+
+func getMetricException(err awserr.Error) metricException {
+ msg := err.Error()
+ code := err.Code()
+
+ switch code {
+ case "RequestError",
+ request.ErrCodeSerialization,
+ request.CanceledErrorCode:
+ return sdkException{
+ requestException{exception: code, message: msg},
+ }
+ default:
+ return awsException{
+ requestException{exception: code, message: msg},
+ }
+ }
+}
+
+func (rep *Reporter) sendAPICallMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Type: aws.String("ApiCall"),
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ Region: r.Config.Region,
+ Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
+ XAmzRequestID: aws.String(r.RequestID),
+ MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
+ }
+
+ if r.HTTPResponse != nil {
+ m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetFinalException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+
+ // TODO: Probably want to figure something out for logging dropped
+ // metrics
+ rep.metricsCh.Push(m)
+}
+
+func (rep *Reporter) connect(network, url string) error {
+ if rep.conn != nil {
+ rep.conn.Close()
+ }
+
+ conn, err := net.Dial(network, url)
+ if err != nil {
+ return awserr.New("UDPError", "Could not connect", err)
+ }
+
+ rep.conn = conn
+
+ return nil
+}
+
+func (rep *Reporter) close() {
+ if rep.done != nil {
+ close(rep.done)
+ }
+
+ rep.metricsCh.Pause()
+}
+
+func (rep *Reporter) start() {
+ defer func() {
+ rep.metricsCh.Pause()
+ }()
+
+ for {
+ select {
+ case <-rep.done:
+ rep.done = nil
+ return
+ case m := <-rep.metricsCh.ch:
+ // TODO: What to do with this error? Probably should just log
+ b, err := json.Marshal(m)
+ if err != nil {
+ continue
+ }
+
+ rep.conn.Write(b)
+ }
+ }
+}
+
+// Pause will pause the metric channel preventing any new metrics from being
+// added. It is safe to call concurrently with other calls to Pause, but if
+// called concurently with Continue can lead to unexpected state.
+func (rep *Reporter) Pause() {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if rep == nil {
+ return
+ }
+
+ rep.close()
+}
+
+// Continue will reopen the metric channel and allow for monitoring to be
+// resumed. It is safe to call concurrently with other calls to Continue, but
+// if called concurently with Pause can lead to unexpected state.
+func (rep *Reporter) Continue() {
+ lock.Lock()
+ defer lock.Unlock()
+ if rep == nil {
+ return
+ }
+
+ if !rep.metricsCh.IsPaused() {
+ return
+ }
+
+ rep.metricsCh.Continue()
+}
+
+// Client side metric handler names
+const (
+ APICallMetricHandlerName = "awscsm.SendAPICallMetric"
+ APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
+)
+
+// InjectHandlers will will enable client side metrics and inject the proper
+// handlers to handle how metrics are sent.
+//
+// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
+// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
+//
+// // Start must be called in order to inject the correct handlers
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+//
+// sess := session.NewSession()
+// r.InjectHandlers(&sess.Handlers)
+//
+// // create a new service client with our client side metric session
+// svc := s3.New(sess)
+func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
+ if rep == nil {
+ return
+ }
+
+ handlers.Complete.PushFrontNamed(request.NamedHandler{
+ Name: APICallMetricHandlerName,
+ Fn: rep.sendAPICallMetric,
+ })
+
+ handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
+ Name: APICallAttemptMetricHandlerName,
+ Fn: rep.sendAPICallAttemptMetric,
+ })
+}
+
+// boolIntValue return 1 for true and 0 for false.
+func boolIntValue(b bool) int {
+ if b {
+ return 1
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 000000000..23bb639e0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,207 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: CredProviders(cfg, handlers),
+ })
+}
+
+// CredProviders returns the slice of providers used in
+// the default credential chain.
+//
+// For applications that need to use some other provider (for example use
+// different environment variables for legacy reasons) but still fall back
+// on the default chain of providers. This allows that default chaint to be
+// automatically updated
+func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
+ return []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ RemoteCredProvider(*cfg, handlers),
+ }
+}
+
+const (
+ httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
+// endpoints such as EC2 or ECS Roles.
+func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+ return localHTTPCredProvider(cfg, handlers, u)
+ }
+
+ if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
+ u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
+ return httpCredProvider(cfg, handlers, u)
+ }
+
+ return ec2RoleProvider(cfg, handlers)
+}
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else {
+ host := aws.URLHostname(parsed)
+ if len(host) == 0 {
+ errMsg = "unable to parse host from local HTTP cred provider URL"
+ } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
+ } else if !isLoopback {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ }
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
+ func(p *endpointcreds.Provider) {
+ p.ExpiryWindow = 5 * time.Minute
+ p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ },
+ )
+}
+
+func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ resolver := cfg.EndpointResolver
+ if resolver == nil {
+ resolver = endpoints.DefaultResolver()
+ }
+
+ e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
+ return &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
+ ExpiryWindow: 5 * time.Minute,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
new file mode 100644
index 000000000..ca0ee1dcc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
@@ -0,0 +1,27 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return shareddefaults.SharedCredentialsFilename()
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return shareddefaults.SharedConfigFilename()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100644
index 000000000..4fcb61618
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 000000000..d126764ce
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,170 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+ op := &request.Operation{
+ Name: "GetUserData",
+ HTTPMethod: "GET",
+ HTTPPath: "/user-data",
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == http.StatusNotFound {
+ r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+ }
+ })
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ if len(resp) == 0 {
+ return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 000000000..4c5636e35
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,152 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+//
+// This package's client can be disabled completely by setting the environment
+// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
+// true instructs the SDK to disable the EC2 Metadata client. The client cannot
+// be used while the environment variable is set to true, (case insensitive).
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Disable the EC2 Metadata service if the environment variable is set.
+ // This shortcirctes the service's functionality to always fail to send
+ // requests.
+ if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
+ svc.Handlers.Send.SwapNamed(request.NamedHandler{
+ Name: corehandlers.SendHandler.Name,
+ Fn: func(r *request.Request) {
+ r.HTTPResponse = &http.Response{
+ Header: http.Header{},
+ }
+ r.Error = awserr.New(
+ request.CanceledErrorCode,
+ "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
+ nil)
+ },
+ })
+ }
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 000000000..87b9ff3ff
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,188 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custRmIotDataService(p)
+ custFixAppAutoscalingChina(p)
+ custFixAppAutoscalingUsGov(p)
+ }
+
+ return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ custAddDualstack(p, "s3")
+ custAddDualstack(p, "s3-control")
+}
+
+func custAddDualstack(p *partition, svcName string) {
+ s, ok := p.Services[svcName]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services[svcName] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+func custFixAppAutoscalingChina(p *partition) {
+ if p.ID != "aws-cn" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ const expectHostname = `autoscaling.{region}.amazonaws.com`
+ if e, a := s.Defaults.Hostname, expectHostname; e != a {
+ fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
+ return
+ }
+
+ s.Defaults.Hostname = expectHostname + ".cn"
+ p.Services[serviceName] = s
+}
+
+func custFixAppAutoscalingUsGov(p *partition) {
+ if p.ID != "aws-us-gov" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ if a := s.Defaults.CredentialScope.Service; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
+ return
+ }
+
+ if a := s.Defaults.Hostname; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
+ return
+ }
+
+ s.Defaults.CredentialScope.Service = "application-autoscaling"
+ s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
+
+ p.Services[serviceName] = s
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 000000000..1d04f3dee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,5898 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+ AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition.
+ AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuNorth1RegionID = "eu-north-1" // EU (Stockholm).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ EuWest3RegionID = "eu-west-3" // EU (Paris).
+ MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+ CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// AWS ISO (US) partition's regions.
+const (
+ UsIsoEast1RegionID = "us-iso-east-1" // US ISO East.
+)
+
+// AWS ISOB (US) partition's regions.
+const (
+ UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio).
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+ awsisoPartition,
+ awsisobPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-east-1": region{
+ Description: "Asia Pacific (Hong Kong)",
+ },
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-north-1": region{
+ Description: "EU (Stockholm)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "eu-west-3": region{
+ Description: "EU (Paris)",
+ },
+ "me-south-1": region{
+ Description: "Middle East (Bahrain)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "a4b": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "api.ecr.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "ap-northeast-1": endpoint{
+ Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "api.ecr.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "api.ecr.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "api.ecr.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "api.ecr.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "api.ecr.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "api.ecr.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "api.ecr.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "me-south-1": endpoint{
+ Hostname: "api.ecr.me-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-south-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "api.ecr.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "api.ecr.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "api.ecr.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "api.ecr.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "api.ecr.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "api.mediatailor": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.pricing": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appmesh": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "fips": endpoint{
+ Hostname: "appstream2-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appsync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "autoscaling-plans",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "backup": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "ce.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "chime": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloud9": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "connect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cur": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "data.mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datasync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "datasync-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "datasync-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "datasync-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "datasync-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dax": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "rds.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "ca-central-1-fips": endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "es-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "forecast": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "forecastquery": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fsx": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "groundstation": service{
+
+ Endpoints: endpoints{
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "guardduty-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "guardduty-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "guardduty-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "guardduty-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotevents": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ioteventsdata": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "data.iotevents.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "data.iotevents.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "data.iotevents.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "data.iotevents.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "data.iotevents.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "data.iotevents.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "data.iotevents.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "iotthingsgraph": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "iotthingsgraph",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kafka": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisvideo": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lakeformation": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "mediaconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "medialive": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediapackage": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mgh": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mq": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "mq-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "mq-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "mq-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "mq-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "rds.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "projects.iot1click": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "qldb": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ram": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "resource-groups-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "resource-groups-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "resource-groups-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "resource-groups-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "robomaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "route53resolver": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3-control.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "s3-control.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "s3-control.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "s3-control.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3-control.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "s3-control.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "s3-control.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "s3-control.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "s3-control.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "s3-control.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "s3-control.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3-control.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3-control.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "s3-control.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-east-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "s3-control.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3-control.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-west-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "securityhub": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-northeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-northeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ca-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-north-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-3": endpoint{
+ Protocols: []string{"https"},
+ },
+ "me-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "sa-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "servicediscovery": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "session.qldb": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sqs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sqs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sqs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sqs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "ca-central-1-fips": endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-global",
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "support.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transcribestreaming": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transfer": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workmail": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "me-south-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ "cn-northwest-1": region{
+ Description: "China (Ningxia)",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{
+ Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-cn-global",
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "support.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-east-1": region{
+ Description: "AWS GovCloud (US-East)",
+ },
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "datasync": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "datasync-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "es-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "rds.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "rds.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "organizations.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ram": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "fips-us-gov-east-1": endpoint{
+ Hostname: "resource-groups.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "resource-groups.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "route53.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "route53resolver": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "s3-control.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3-control.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "transcribe": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsIsoPartition returns the Resolver for AWS ISO (US).
+func AwsIsoPartition() Partition {
+ return awsisoPartition.Partition()
+}
+
+var awsisoPartition = partition{
+ ID: "aws-iso",
+ Name: "AWS ISO (US)",
+ DNSSuffix: "c2s.ic.gov",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-iso-east-1": region{
+ Description: "US ISO East",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-iso-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-iso-global": endpoint{
+ Hostname: "iam.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-iso-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-iso-global": endpoint{
+ Hostname: "route53.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-iso-global",
+
+ Endpoints: endpoints{
+ "aws-iso-global": endpoint{
+ Hostname: "support.us-iso-east-1.c2s.ic.gov",
+ CredentialScope: credentialScope{
+ Region: "us-iso-east-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "us-iso-east-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsIsoBPartition returns the Resolver for AWS ISOB (US).
+func AwsIsoBPartition() Partition {
+ return awsisobPartition.Partition()
+}
+
+var awsisobPartition = partition{
+ ID: "aws-iso-b",
+ Name: "AWS ISOB (US)",
+ DNSSuffix: "sc2s.sgov.gov",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-isob-east-1": region{
+ Description: "US ISOB East (Ohio)",
+ },
+ },
+ Services: services{
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-iso-b-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-iso-b-global": endpoint{
+ Hostname: "iam.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ "support": service{
+ PartitionEndpoint: "aws-iso-b-global",
+
+ Endpoints: endpoints{
+ "aws-iso-b-global": endpoint{
+ Hostname: "support.us-isob-east-1.sc2s.sgov.gov",
+ CredentialScope: credentialScope{
+ Region: "us-isob-east-1",
+ },
+ },
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-isob-east-1": endpoint{},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
new file mode 100644
index 000000000..ca8fc828e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
@@ -0,0 +1,141 @@
+package endpoints
+
+// Service identifiers
+//
+// Deprecated: Use client package's EndpointsID value instead of these
+// ServiceIDs. These IDs are not maintained, and are out of date.
+const (
+ A4bServiceID = "a4b" // A4b.
+ AcmServiceID = "acm" // Acm.
+ AcmPcaServiceID = "acm-pca" // AcmPca.
+ ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
+ ApiPricingServiceID = "api.pricing" // ApiPricing.
+ ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AppsyncServiceID = "appsync" // Appsync.
+ AthenaServiceID = "athena" // Athena.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ CeServiceID = "ce" // Ce.
+ ChimeServiceID = "chime" // Chime.
+ Cloud9ServiceID = "cloud9" // Cloud9.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ComprehendServiceID = "comprehend" // Comprehend.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DaxServiceID = "dax" // Dax.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ FmsServiceID = "fms" // Fms.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
+ GuarddutyServiceID = "guardduty" // Guardduty.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MediaconvertServiceID = "mediaconvert" // Mediaconvert.
+ MedialiveServiceID = "medialive" // Medialive.
+ MediapackageServiceID = "mediapackage" // Mediapackage.
+ MediastoreServiceID = "mediastore" // Mediastore.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ NeptuneServiceID = "neptune" // Neptune.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
+ S3ServiceID = "s3" // S3.
+ S3ControlServiceID = "s3-control" // S3Control.
+ SagemakerServiceID = "api.sagemaker" // Sagemaker.
+ SdbServiceID = "sdb" // Sdb.
+ SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
+ ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ TransferServiceID = "transfer" // Transfer.
+ TranslateServiceID = "translate" // Translate.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkmailServiceID = "workmail" // Workmail.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 000000000..84316b92c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.ID())
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.ID())
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 000000000..fadff07d6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,499 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+
+ // Enables resolving a service endpoint based on the region provided if the
+ // service does not exist. The service endpoint ID will be used as the service
+ // domain name prefix. By default the endpoint resolver requires the service
+ // to be known when resolving endpoints.
+ //
+ // If resolving an endpoint on the partition list the provided region will
+ // be used to determine which partition's domain name pattern to the service
+ // endpoint ID with. If both the service and region are unknown and resolving
+ // the endpoint on partition list an UnknownEndpointError error will be returned.
+ //
+ // If resolving and endpoint on a partition specific resolver that partition's
+ // domain name pattern will be used with the service endpoint ID. If both
+ // region and service do not exist when resolving an endpoint on a specific
+ // partition the partition's domain pattern will be used to combine the
+ // endpoint and region together.
+ //
+ // This option is ignored if StrictMatching is enabled.
+ ResolveUnknownService bool
+
+ // STS Regional Endpoint flag helps with resolving the STS endpoint
+ STSRegionalEndpoint STSRegionalEndpoint
+}
+
+// STSRegionalEndpoint is an enum type alias for int
+// It is used internally by the core sdk as STS Regional Endpoint flag value
+type STSRegionalEndpoint int
+
+const (
+
+ // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
+ UnsetSTSEndpoint STSRegionalEndpoint = iota
+
+ // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
+ // to use legacy endpoints.
+ LegacySTSEndpoint
+
+ // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
+ // to use regional endpoints.
+ RegionalSTSEndpoint
+)
+
+// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
+// on the input string provided in env config or shared config by the user.
+//
+// `legacy`, `regional` are the only case-insensitive valid strings for
+// resolving the STS regional Endpoint flag.
+func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
+ switch {
+ case strings.EqualFold(s, "legacy"):
+ return LegacySTSEndpoint, nil
+ case strings.EqualFold(s, "regional"):
+ return RegionalSTSEndpoint, nil
+ default:
+ return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
+ }
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+ o.ResolveUnknownService = true
+}
+
+// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
+// STS endpoint to their regional endpoint, instead of the global endpoint.
+func STSRegionalEndpointOption(o *Options) {
+ o.STSRegionalEndpoint = RegionalSTSEndpoint
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id, dnsSuffix string
+ p *partition
+}
+
+// DNSSuffix returns the base domain name of the partition.
+func (p Partition) DNSSuffix() string { return p.dnsSuffix }
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned may look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id, r := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// Description returns the region's description. The region description
+// is free text, it can be empty, and it may change between SDK releases.
+func (r Region) Description() string { return r.desc }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if r, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The endpoint partition
+ PartitionID string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // States that the signing name for this endpoint was derived from metadata
+ // passed in, but was not explicitly modeled.
+ SigningNameDerived bool
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go
new file mode 100644
index 000000000..261396219
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go
@@ -0,0 +1,19 @@
+package endpoints
+
+var stsLegacyGlobalRegions = map[string]struct{}{
+ "ap-northeast-1": {},
+ "ap-south-1": {},
+ "ap-southeast-1": {},
+ "ap-southeast-2": {},
+ "ca-central-1": {},
+ "eu-central-1": {},
+ "eu-north-1": {},
+ "eu-west-1": {},
+ "eu-west-2": {},
+ "eu-west-3": {},
+ "sa-east-1": {},
+ "us-east-1": {},
+ "us-east-2": {},
+ "us-west-1": {},
+ "us-west-2": {},
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 000000000..7b09adff6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,340 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ dnsSuffix: p.DNSSuffix,
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func allowLegacyEmptyRegion(service string) bool {
+ legacy := map[string]struct{}{
+ "budgets": {},
+ "ce": {},
+ "chime": {},
+ "cloudfront": {},
+ "ec2metadata": {},
+ "iam": {},
+ "importexport": {},
+ "organizations": {},
+ "route53": {},
+ "sts": {},
+ "support": {},
+ "waf": {},
+ }
+
+ _, allowed := legacy[service]
+ return allowed
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ s, hasService := p.Services[service]
+ if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
+ region = s.PartitionEndpoint
+ }
+
+ if service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint {
+ if _, ok := stsLegacyGlobalRegions[region]; ok {
+ region = "aws-global"
+ }
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+
+ return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ PartitionID: partitionID,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningNameDerived: signingNameDerived,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 000000000..0fdfcc56e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,351 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+
+ // Disables code generation of the service endpoint prefix IDs defined in
+ // the model.
+ DisableGenerateServiceIDs bool
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ v := struct {
+ Resolver
+ CodeGenOptions
+ }{
+ Resolver: resolver,
+ CodeGenOptions: opts,
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" $.Resolver }}
+
+ {{ range $_, $partition := $.Resolver }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ if not $.DisableGenerateServiceIDs -}}
+ {{ template "service consts" $.Resolver }}
+ {{- end }}
+
+ {{ template "endpoint resolvers" $.Resolver }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 000000000..fa06f7a8f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,13 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644
index 000000000..91a6f277a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 000000000..6ed15b2ec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,118 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nil, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+
+ // LogDebugWithEventStreamBody states the SDK should log EventStream
+ // request and response bodys. This should be used to log the EventStream
+ // wire unmarshaled message content of requests and responses made while
+ // using the SDK Will also enable LogDebug.
+ LogDebugWithEventStreamBody
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 000000000..d9b37f4d3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,18 @@
+package request
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ if strings.Contains(err.Error(), "read: connection reset") {
+ return false
+ }
+
+ if strings.Contains(err.Error(), "connection reset") ||
+ strings.Contains(err.Error(), "broken pipe") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 000000000..185b07318
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,322 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalStream HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ CompleteAttempt HandlerList
+ Complete HandlerList
+}
+
+// Copy returns a copy of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalStream: h.UnmarshalStream.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ CompleteAttempt: h.CompleteAttempt.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers.
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalStream.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.CompleteAttempt.Clear()
+ h.Complete.Clear()
+}
+
+// IsEmpty returns if there are no handlers in any of the handlerlists.
+func (h *Handlers) IsEmpty() bool {
+ if h.Validate.Len() != 0 {
+ return false
+ }
+ if h.Build.Len() != 0 {
+ return false
+ }
+ if h.Send.Len() != 0 {
+ return false
+ }
+ if h.Sign.Len() != 0 {
+ return false
+ }
+ if h.Unmarshal.Len() != 0 {
+ return false
+ }
+ if h.UnmarshalStream.Len() != 0 {
+ return false
+ }
+ if h.UnmarshalMeta.Len() != 0 {
+ return false
+ }
+ if h.UnmarshalError.Len() != 0 {
+ return false
+ }
+ if h.ValidateResponse.Len() != 0 {
+ return false
+ }
+ if h.Retry.Len() != 0 {
+ return false
+ }
+ if h.AfterRetry.Len() != 0 {
+ return false
+ }
+ if h.CompleteAttempt.Len() != 0 {
+ return false
+ }
+ if h.Complete.Len() != 0 {
+ return false
+ }
+
+ return true
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// Swap will swap out all handlers matching the name passed in. The matched
+// handlers will be swapped in. True is returned if the handlers were swapped.
+func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
+ var swapped bool
+
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == name {
+ l.list[i] = replace
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644
index 000000000..79f79602b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 000000000..9370fa50c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,65 @@
+package request
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
+ reader := &offsetReader{}
+ _, err := buf.Seek(offset, sdkio.SeekStart)
+ if err != nil {
+ return nil, err
+ }
+
+ reader.buf = buf
+ return reader, nil
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
+ if err := o.Close(); err != nil {
+ return nil, err
+ }
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 000000000..8e332cce6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,670 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // ErrCodeInvalidPresignExpire is returned when the expire time provided to
+ // presign is invalid
+ ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ AttemptTime time.Time
+ Time time.Time
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ // Additional API error codes that should be retried. IsErrorRetryable
+ // will consider these codes in addition to its built in cases.
+ RetryErrorCodes []string
+
+ // Additional API error codes that should be retried with throttle backoff
+ // delay. IsErrorThrottle will consider these codes in addition to its
+ // built in cases.
+ ThrottleErrorCodes []string
+
+ // A value greater than 0 instructs the request to be signed as Presigned URL
+ // You should not set this field directly. Instead use Request's
+ // Presign or PresignRequest methods.
+ ExpireTime time.Duration
+
+ context aws.Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ SanitizeHostForHeader(httpReq)
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
+ return false
+ }
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+func fmtAttemptCount(retryCount, maxRetries int) string {
+ return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries)
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+
+ if aws.IsReaderSeekable(reader) {
+ var err error
+ // Get the Bodies current offset so retries will start from the same
+ // initial position.
+ r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ r.Error = awserr.New(ErrCodeSerialization,
+ "failed to determine start of request body", err)
+ return
+ }
+ }
+ r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails. The expire parameter is only used for presigned Amazon
+// S3 API requests. All other AWS services will use a fixed expiration
+// time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+func (r *Request) Presign(expire time.Duration) (string, error) {
+ r = r.copy()
+
+ // Presign requires all headers be hoisted. There is no way to retrieve
+ // the signed headers not hoisted without this. Making the presigned URL
+ // useless.
+ r.NotHoist = false
+
+ u, _, err := getPresignedURL(r, expire)
+ return u, err
+}
+
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed. The expire parameter is only used for
+// presigned Amazon S3 API requests. All other AWS services will use a fixed
+// expiration time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
+func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
+ r = r.copy()
+ return getPresignedURL(r, expire)
+}
+
+// IsPresigned returns true if the request represents a presigned API url.
+func (r *Request) IsPresigned() bool {
+ return r.ExpireTime != 0
+}
+
+func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
+ if expire <= 0 {
+ return "", nil, awserr.New(
+ ErrCodeInvalidPresignExpire,
+ "presigned URL requires an expire duration greater than 0",
+ nil,
+ )
+ }
+
+ r.ExpireTime = expire
+
+ if r.Operation.BeforePresignFn != nil {
+ if err := r.Operation.BeforePresignFn(r); err != nil {
+ return "", nil, err
+ }
+ }
+
+ if err := r.Sign(); err != nil {
+ return "", nil, err
+ }
+
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+const (
+ notRetrying = "not retrying"
+)
+
+func debugLogReqError(r *Request, stage, retryStr string, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Any additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", notRetrying, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", notRetrying, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request, returning error if errors are encountered.
+//
+// Sign will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", notRetrying, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody, err = newOffsetReader(r.Body, r.BodyStart)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization,
+ "failed to get next request body reader", err)
+ }
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := aws.SeekerLen(r.Body)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization,
+ "failed to compute request body size", err)
+ }
+
+ if l == 0 {
+ body = NoBody
+ } else if l > 0 {
+ body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker, or did not implement
+ // Len() method.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ body = NoBody
+ default:
+ body = r.safeBody
+ }
+ }
+
+ return body, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request, returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ if err := r.Error; err != nil {
+ return err
+ }
+
+ for {
+ r.Error = nil
+ r.AttemptTime = time.Now()
+
+ if err := r.Sign(); err != nil {
+ debugLogReqError(r, "Sign Request", notRetrying, err)
+ return err
+ }
+
+ if err := r.sendRequest(); err == nil {
+ return nil
+ }
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+
+ if r.Error != nil || !aws.BoolValue(r.Retryable) {
+ return r.Error
+ }
+
+ if err := r.prepareRetry(); err != nil {
+ r.Error = err
+ return err
+ }
+ }
+}
+
+func (r *Request) prepareRetry() error {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+ if err := r.Error; err != nil {
+ return awserr.New(ErrCodeSerialization,
+ "failed to prepare body for retry", err)
+
+ }
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+
+ return nil
+}
+
+func (r *Request) sendRequest() (sendErr error) {
+ defer r.Handlers.CompleteAttempt.Run(r)
+
+ r.Retryable = nil
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request",
+ fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+ r.Error)
+ return r.Error
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ debugLogReqError(r, "Validate Response",
+ fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+ r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response",
+ fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+ r.Error)
+ return r.Error
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644
index 000000000..e36e468b7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,39 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// NoBody is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = noBody{}
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 000000000..de1292f45
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,36 @@
+// +build go1.8
+
+package request
+
+import (
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = awserr.New(ErrCodeSerialization,
+ "failed to reset request body", err)
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644
index 000000000..a7365cd1e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644
index 000000000..307fa0705
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 000000000..f093fc542
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,264 @@
+package request
+
+import (
+ "reflect"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// cont := true
+// for p.Next() && cont {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+ // EndPageOnSameToken, when enabled, will allow the paginator to stop on
+ // token that are the same as its previous tokens.
+ EndPageOnSameToken bool
+
+ started bool
+ prevTokens []interface{}
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ if !p.started {
+ return true
+ }
+
+ hasNextPage := len(p.nextTokens) != 0
+ if p.EndPageOnSameToken {
+ return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
+ }
+ return hasNextPage
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.prevTokens = p.nextTokens
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if !v {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(vs) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ v := vs[0]
+
+ switch tv := v.(type) {
+ case *string:
+ if len(aws.StringValue(tv)) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ case string:
+ if len(tv) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ }
+
+ tokenAdded = true
+ tokens = append(tokens, v)
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 000000000..e84084da5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,276 @@
+package request
+
+import (
+ "net"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer provides the interface drive the SDK's request retry behavior. The
+// Retryer implementation is responsible for implementing exponential backoff,
+// and determine if a request API error should be retried.
+//
+// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
+// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle
+// methods to determine if the request is retried.
+type Retryer interface {
+ // RetryRules return the retry delay that should be used by the SDK before
+ // making another request attempt for the failed request.
+ RetryRules(*Request) time.Duration
+
+ // ShouldRetry returns if the failed request is retryable.
+ //
+ // Implementations may consider request attempt count when determining if a
+ // request is retryable, but the SDK will use MaxRetries to limit the
+ // number of attempts a request are made.
+ ShouldRetry(*Request) bool
+
+ // MaxRetries is the number of times a request may be retried before
+ // failing.
+ MaxRetries() int
+}
+
+// WithRetryer sets a Retryer value to the given Config returning the Config
+// value for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+ "TransactionInProgressException": {},
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if t, ok := err.(temporary); ok {
+ return t.Temporary() || isErrConnectionReset(err)
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err == nil {
+ return false
+ }
+ return shouldRetryError(err)
+}
+
+type temporary interface {
+ Temporary() bool
+}
+
+func shouldRetryError(origErr error) bool {
+ switch err := origErr.(type) {
+ case awserr.Error:
+ if err.Code() == CanceledErrorCode {
+ return false
+ }
+ if isNestedErrorRetryable(err) {
+ return true
+ }
+
+ origErr := err.OrigErr()
+ var shouldRetry bool
+ if origErr != nil {
+ shouldRetry := shouldRetryError(origErr)
+ if err.Code() == "RequestError" && !shouldRetry {
+ return false
+ }
+ }
+ if isCodeRetryable(err.Code()) {
+ return true
+ }
+ return shouldRetry
+
+ case *url.Error:
+ if strings.Contains(err.Error(), "connection refused") {
+ // Refused connections should be retried as the service may not yet
+ // be running on the port. Go TCP dial considers refused
+ // connections as not temporary.
+ return true
+ }
+ // *url.Error only implements Temporary after golang 1.6 but since
+ // url.Error only wraps the error:
+ return shouldRetryError(err.Err)
+
+ case temporary:
+ if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
+ return true
+ }
+ // If the error is temporary, we want to allow continuation of the
+ // retry process
+ return err.Temporary() || isErrConnectionReset(origErr)
+
+ case nil:
+ // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
+ // because we don't know the cause, it is marked as retryable. See
+ // TestRequest4xxUnretryable for an example.
+ return true
+
+ default:
+ switch err.Error() {
+ case "net/http: request canceled",
+ "net/http: request canceled while waiting for connection":
+ // known 1.5 error case when an http request is cancelled
+ return false
+ }
+ // here we don't know the error; so we allow a retry.
+ return true
+ }
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+ return isCodeThrottle(aerr.Code())
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry
+// error. Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ if isErrCode(r.Error, r.RetryErrorCodes) {
+ return true
+ }
+
+ // HTTP response status code 501 should not be retried.
+ // 501 represents Not Implemented which means the request method is not
+ // supported by the server and cannot be handled.
+ if r.HTTPResponse != nil {
+ // HTTP response status code 500 represents internal server error and
+ // should be retried without any throttle.
+ if r.HTTPResponse.StatusCode == 500 {
+ return true
+ }
+ }
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its
+// code. Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ if isErrCode(r.Error, r.ThrottleErrorCodes) {
+ return true
+ }
+
+ if r.HTTPResponse != nil {
+ switch r.HTTPResponse.StatusCode {
+ case
+ 429, // error caused due to too many requests
+ 502, // Bad Gateway error should be throttled
+ 503, // caused when service is unavailable
+ 504: // error occurred due to gateway timeout
+ return true
+ }
+ }
+
+ return IsErrorThrottle(r.Error)
+}
+
+func isErrCode(err error, codes []string) bool {
+ if aerr, ok := err.(awserr.Error); ok && aerr != nil {
+ for _, code := range codes {
+ if code == aerr.Code() {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 000000000..09a44eb98
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644
index 000000000..8630683f3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,286 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+ // ParamMaxLenErrCode is the error code for value being too long.
+ ParamMaxLenErrCode = "ParamMaxLenError"
+
+ // ParamFormatErrCode is the error code for a field with invalid
+ // format or characters.
+ ParamFormatErrCode = "ParamFormatInvalidError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
+
+// An ErrParamMaxLen represents a maximum length parameter error.
+type ErrParamMaxLen struct {
+ errInvalidParam
+ max int
+}
+
+// NewErrParamMaxLen creates a new maximum length parameter error.
+func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
+ return &ErrParamMaxLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMaxLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("maximum size of %v, %v", max, value),
+ },
+ max: max,
+ }
+}
+
+// MaxLen returns the field's required minimum length.
+func (e *ErrParamMaxLen) MaxLen() int {
+ return e.max
+}
+
+// An ErrParamFormat represents a invalid format parameter error.
+type ErrParamFormat struct {
+ errInvalidParam
+ format string
+}
+
+// NewErrParamFormat creates a new invalid format parameter error.
+func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
+ return &ErrParamFormat{
+ errInvalidParam: errInvalidParam{
+ code: ParamFormatErrCode,
+ field: field,
+ msg: fmt.Sprintf("format %v, %v", format, value),
+ },
+ format: format,
+ }
+}
+
+// Format returns the field's required format.
+func (e *ErrParamFormat) Format() string {
+ return e.format
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644
index 000000000..4601f883c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,295 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(aws.Context, time.Duration) error
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else {
+ sleepCtxFn := w.SleepWithContext
+ if sleepCtxFn == nil {
+ sleepCtxFn = aws.SleepWithContext
+ }
+
+ if err := sleepCtxFn(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
new file mode 100644
index 000000000..ea9ebb6f6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
@@ -0,0 +1,26 @@
+// +build go1.7
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
new file mode 100644
index 000000000..fec39dfc1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
@@ -0,0 +1,22 @@
+// +build !go1.6,go1.5
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
new file mode 100644
index 000000000..1c5a5391e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
@@ -0,0 +1,23 @@
+// +build !go1.7,go1.6
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
new file mode 100644
index 000000000..7713ccfca
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
@@ -0,0 +1,259 @@
+package session
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+func resolveCredentials(cfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) (*credentials.Credentials, error) {
+
+ switch {
+ case len(sessOpts.Profile) != 0:
+ // User explicitly provided an Profile in the session's configuration
+ // so load that profile from shared config first.
+ // Github(aws/aws-sdk-go#2727)
+ return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
+
+ case envCfg.Creds.HasKeys():
+ // Environment credentials
+ return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil
+
+ case len(envCfg.WebIdentityTokenFilePath) != 0:
+ // Web identity token from environment, RoleARN required to also be
+ // set.
+ return assumeWebIdentity(cfg, handlers,
+ envCfg.WebIdentityTokenFilePath,
+ envCfg.RoleARN,
+ envCfg.RoleSessionName,
+ )
+
+ default:
+ // Fallback to the "default" credential resolution chain.
+ return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
+ }
+}
+
+// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but
+// 'AWS_IAM_ROLE_ARN' was not set.
+var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil)
+
+// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but
+// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set.
+var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil)
+
+func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers,
+ filepath string,
+ roleARN, sessionName string,
+) (*credentials.Credentials, error) {
+
+ if len(filepath) == 0 {
+ return nil, WebIdentityEmptyTokenFilePathErr
+ }
+
+ if len(roleARN) == 0 {
+ return nil, WebIdentityEmptyRoleARNErr
+ }
+
+ creds := stscreds.NewWebIdentityCredentials(
+ &Session{
+ Config: cfg,
+ Handlers: handlers.Copy(),
+ },
+ roleARN,
+ sessionName,
+ filepath,
+ )
+
+ return creds, nil
+}
+
+func resolveCredsFromProfile(cfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) (creds *credentials.Credentials, err error) {
+
+ switch {
+ case sharedCfg.SourceProfile != nil:
+ // Assume IAM role with credentials source from a different profile.
+ creds, err = resolveCredsFromProfile(cfg, envCfg,
+ *sharedCfg.SourceProfile, handlers, sessOpts,
+ )
+
+ case sharedCfg.Creds.HasKeys():
+ // Static Credentials from Shared Config/Credentials file.
+ creds = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.Creds,
+ )
+
+ case len(sharedCfg.CredentialProcess) != 0:
+ // Get credentials from CredentialProcess
+ creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
+
+ case len(sharedCfg.CredentialSource) != 0:
+ creds, err = resolveCredsFromSource(cfg, envCfg,
+ sharedCfg, handlers, sessOpts,
+ )
+
+ case len(sharedCfg.WebIdentityTokenFile) != 0:
+ // Credentials from Assume Web Identity token require an IAM Role, and
+ // that roll will be assumed. May be wrapped with another assume role
+ // via SourceProfile.
+ return assumeWebIdentity(cfg, handlers,
+ sharedCfg.WebIdentityTokenFile,
+ sharedCfg.RoleARN,
+ sharedCfg.RoleSessionName,
+ )
+
+ default:
+ // Fallback to default credentials provider, include mock errors for
+ // the credential chain so user can identify why credentials failed to
+ // be retrieved.
+ creds = credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credProviderError{
+ Err: awserr.New("EnvAccessKeyNotFound",
+ "failed to find credentials in the environment.", nil),
+ },
+ &credProviderError{
+ Err: awserr.New("SharedCredsLoad",
+ fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil),
+ },
+ defaults.RemoteCredProvider(*cfg, handlers),
+ },
+ })
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if len(sharedCfg.RoleARN) > 0 {
+ cfgCp := *cfg
+ cfgCp.Credentials = creds
+ return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts)
+ }
+
+ return creds, nil
+}
+
+// valid credential source values
+const (
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+)
+
+func resolveCredsFromSource(cfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) (creds *credentials.Credentials, err error) {
+
+ switch sharedCfg.CredentialSource {
+ case credSourceEc2Metadata:
+ p := defaults.RemoteCredProvider(*cfg, handlers)
+ creds = credentials.NewCredentials(p)
+
+ case credSourceEnvironment:
+ creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds)
+
+ case credSourceECSContainer:
+ if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
+ return nil, ErrSharedConfigECSContainerEnvVarEmpty
+ }
+
+ p := defaults.RemoteCredProvider(*cfg, handlers)
+ creds = credentials.NewCredentials(p)
+
+ default:
+ return nil, ErrSharedConfigInvalidCredSource
+ }
+
+ return creds, nil
+}
+
+func credsFromAssumeRole(cfg aws.Config,
+ handlers request.Handlers,
+ sharedCfg sharedConfig,
+ sessOpts Options,
+) (*credentials.Credentials, error) {
+
+ if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return nil, AssumeRoleTokenProviderNotSetError{}
+ }
+
+ return stscreds.NewCredentials(
+ &Session{
+ Config: &cfg,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.RoleARN,
+ func(opt *stscreds.AssumeRoleProvider) {
+ opt.RoleSessionName = sharedCfg.RoleSessionName
+ opt.Duration = sessOpts.AssumeRoleDuration
+
+ // Assume role with external ID
+ if len(sharedCfg.ExternalID) > 0 {
+ opt.ExternalID = aws.String(sharedCfg.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.MFASerial) > 0 {
+ opt.SerialNumber = aws.String(sharedCfg.MFASerial)
+ opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+ }
+ },
+ ), nil
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a
+// session when the MFAToken option is not set when shared config is configured
+// load assume a role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+ return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+ Err error
+}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+ return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100644
index 000000000..7ec66e7e5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -0,0 +1,245 @@
+/*
+Package session provides configuration for the SDK's service clients. Sessions
+can be shared across service clients that share the same base configuration.
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. Sessions should be cached when possible, because creating a new
+Session will load all configuration values from the environment, and config
+files each time the Session is created. Sharing the Session value across all of
+your service clients will ensure the configuration is loaded the fewest number
+of times possible.
+
+Sessions options from Shared Config
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. Using the NewSessionWithOptions with
+SharedConfigState set to SharedConfigEnable will create the session as if the
+AWS_SDK_LOAD_CONFIG environment variable was set.
+
+Credential and config loading order
+
+The Session will attempt to load configuration and credentials from the
+environment, configuration files, and other credential sources. The order
+configuration is loaded in is:
+
+ * Environment Variables
+ * Shared Credentials file
+ * Shared Configuration file (if SharedConfig is enabled)
+ * EC2 Instance Metadata (credentials only)
+
+The Environment variables for credentials will have precedence over shared
+config even if SharedConfig is enabled. To override this behavior, and use
+shared config credentials instead specify the session.Options.Profile, (e.g.
+when using credential_source=Environment to assume a role).
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ Profile: "myProfile",
+ })
+
+Creating Sessions
+
+Creating a Session without additional options will load credentials region, and
+profile loaded from the environment and shared config automatically. See,
+"Environment Variables" section for information on environment variables used
+by Session.
+
+ // Create Session
+ sess, err := session.NewSession()
+
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded, config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+ // Create a Session with a custom region
+ sess, err := session.NewSession(&aws.Config{
+ Region: aws.String("us-west-2"),
+ })
+
+Use NewSessionWithOptions to provide additional configuration driving how the
+Session's configuration will be loaded. Such as, specifying shared config
+profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG).
+
+ // Equivalent to session.NewSession()
+ sess, err := session.NewSessionWithOptions(session.Options{
+ // Options
+ })
+
+ sess, err := session.NewSessionWithOptions(session.Options{
+ // Specify profile to load for the session's config
+ Profile: "profile_name",
+
+ // Provide SDK Config options, such as Region.
+ Config: aws.Config{
+ Region: aws.String("us-west-2"),
+ },
+
+ // Force enable Shared Config support
+ SharedConfigState: session.SharedConfigEnable,
+ })
+
+Adding Handlers
+
+You can add handlers to a session to decorate API operation, (e.g. adding HTTP
+headers). All clients that use the Session receive a copy of the Session's
+handlers. For example, the following request handler added to the Session logs
+every requests made.
+
+ // Create a session, and add additional handlers for all service
+ // clients created with the Session to inherit. Adds logging handler.
+ sess := session.Must(session.NewSession())
+
+ sess.Handlers.Send.PushFront(func(r *request.Request) {
+ // Log every request made and its payload
+ logger.Printf("Request: %s/%s, Params: %s",
+ r.ClientInfo.ServiceName, r.Operation, r.Params)
+ })
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's
+(~/.aws/credentials) credentials values, and all other config is provided by
+the environment variables, SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file
+(~/.aws/config).
+
+Credentials are the values the SDK uses to authenticating requests with AWS
+Services. When specified in a file, both aws_access_key_id and
+aws_secret_access_key must be provided together in the same file to be
+considered valid. They will be ignored if both are not present.
+aws_session_token is an optional field that can be provided in addition to the
+other two fields.
+
+ aws_access_key_id = AKID
+ aws_secret_access_key = SECRET
+ aws_session_token = TOKEN
+
+ ; region only supported if SharedConfigEnabled.
+ region = us-east-1
+
+Assume Role configuration
+
+The role_arn field allows you to configure the SDK to assume an IAM role using
+a set of credentials from another source. Such as when paired with static
+credentials, "profile_source", "credential_process", or "credential_source"
+fields. If "role_arn" is provided, a source of credentials must also be
+specified, such as "source_profile", "credential_source", or
+"credential_process".
+
+ role_arn = arn:aws:iam:::role/
+ source_profile = profile_with_creds
+ external_id = 1234
+ mfa_serial =
+ role_session_name = session_name
+
+
+The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you
+must also set the Session Option.AssumeRoleTokenProvider. The Session will fail
+to load if the AssumeRoleTokenProvider is not specified.
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+ }))
+
+To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider
+documentation.
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+ # Access Key ID
+ AWS_ACCESS_KEY_ID=AKID
+ AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+ # Secret Access Key
+ AWS_SECRET_ACCESS_KEY=SECRET
+ AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+ # Session Token
+ AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+ AWS_REGION=us-east-1
+
+ # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_REGION is not also set.
+ AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+ AWS_PROFILE=my_profile
+
+ # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_PROFILE is not also set.
+ AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+ AWS_SDK_LOAD_CONFIG=1
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+ AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+ AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+*/
+package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100644
index 000000000..530cc3a9c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -0,0 +1,299 @@
+package session
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// EnvProviderName provides a name of the provider when config is loaded from environment.
+const EnvProviderName = "EnvConfigCredentials"
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Creds credentials.Value
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-east-1
+ //
+ // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_REGION is not also set.
+ // AWS_DEFAULT_REGION=us-east-1
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ //
+ // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_PROFILE is not also set.
+ // AWS_DEFAULT_PROFILE=my_profile
+ Profile string
+
+ // SDK load config instructs the SDK to load the shared config in addition to
+ // shared credentials. This also expands the configuration loaded from the shared
+ // credentials to have parity with the shared config file. This also enables
+ // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+ // env values as well.
+ //
+ // AWS_SDK_LOAD_CONFIG=1
+ EnableSharedConfig bool
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ csmEnabled string
+ CSMEnabled *bool
+ CSMPort string
+ CSMHost string
+ CSMClientID string
+
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery *bool
+ enableEndpointDiscovery string
+
+ // Specifies the WebIdentity token the SDK should use to assume a role
+ // with.
+ //
+ // AWS_WEB_IDENTITY_TOKEN_FILE=file_path
+ WebIdentityTokenFilePath string
+
+ // Specifies the IAM role arn to use when assuming an role.
+ //
+ // AWS_ROLE_ARN=role_arn
+ RoleARN string
+
+ // Specifies the IAM role session name to use when assuming a role.
+ //
+ // AWS_ROLE_SESSION_NAME=session_name
+ RoleSessionName string
+
+ // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service
+ //
+ // AWS_STS_REGIONAL_ENDPOINTS =sts_regional_endpoint
+ // This can take value as `regional` or `legacy`
+ STSRegionalEndpoint endpoints.STSRegionalEndpoint
+}
+
+var (
+ csmEnabledEnvKey = []string{
+ "AWS_CSM_ENABLED",
+ }
+ csmHostEnvKey = []string{
+ "AWS_CSM_HOST",
+ }
+ csmPortEnvKey = []string{
+ "AWS_CSM_PORT",
+ }
+ csmClientIDEnvKey = []string{
+ "AWS_CSM_CLIENT_ID",
+ }
+ credAccessEnvKey = []string{
+ "AWS_ACCESS_KEY_ID",
+ "AWS_ACCESS_KEY",
+ }
+ credSecretEnvKey = []string{
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ }
+ credSessionEnvKey = []string{
+ "AWS_SESSION_TOKEN",
+ }
+
+ enableEndpointDiscoveryEnvKey = []string{
+ "AWS_ENABLE_ENDPOINT_DISCOVERY",
+ }
+
+ regionEnvKeys = []string{
+ "AWS_REGION",
+ "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ profileEnvKeys = []string{
+ "AWS_PROFILE",
+ "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ sharedCredsFileEnvKey = []string{
+ "AWS_SHARED_CREDENTIALS_FILE",
+ }
+ sharedConfigFileEnvKey = []string{
+ "AWS_CONFIG_FILE",
+ }
+ webIdentityTokenFilePathEnvKey = []string{
+ "AWS_WEB_IDENTITY_TOKEN_FILE",
+ }
+ roleARNEnvKey = []string{
+ "AWS_ROLE_ARN",
+ }
+ roleSessionNameEnvKey = []string{
+ "AWS_ROLE_SESSION_NAME",
+ }
+ stsRegionalEndpointKey = []string{
+ "AWS_STS_REGIONAL_ENDPOINTS",
+ }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() (envConfig, error) {
+ enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+ return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() (envConfig, error) {
+ return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
+ cfg := envConfig{}
+
+ cfg.EnableSharedConfig = enableSharedConfig
+
+ // Static environment credentials
+ var creds credentials.Value
+ setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey)
+ setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey)
+ setFromEnvVal(&creds.SessionToken, credSessionEnvKey)
+ if creds.HasKeys() {
+ // Require logical grouping of credentials
+ creds.ProviderName = EnvProviderName
+ cfg.Creds = creds
+ }
+
+ // Role Metadata
+ setFromEnvVal(&cfg.RoleARN, roleARNEnvKey)
+ setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey)
+
+ // Web identity environment variables
+ setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey)
+
+ // CSM environment variables
+ setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
+ setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
+ setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
+ setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
+
+ if len(cfg.csmEnabled) != 0 {
+ v, _ := strconv.ParseBool(cfg.csmEnabled)
+ cfg.CSMEnabled = &v
+ }
+
+ regionKeys := regionEnvKeys
+ profileKeys := profileEnvKeys
+ if !cfg.EnableSharedConfig {
+ regionKeys = regionKeys[:1]
+ profileKeys = profileKeys[:1]
+ }
+
+ setFromEnvVal(&cfg.Region, regionKeys)
+ setFromEnvVal(&cfg.Profile, profileKeys)
+
+ // endpoint discovery is in reference to it being enabled.
+ setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
+ if len(cfg.enableEndpointDiscovery) > 0 {
+ cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
+ }
+
+ setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
+ setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
+
+ if len(cfg.SharedCredentialsFile) == 0 {
+ cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
+ }
+ if len(cfg.SharedConfigFile) == 0 {
+ cfg.SharedConfigFile = defaults.SharedConfigFilename()
+ }
+
+ cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
+ // STS Regional Endpoint variable
+ for _, k := range stsRegionalEndpointKey {
+ if v := os.Getenv(k); len(v) != 0 {
+ STSRegionalEndpoint, err := endpoints.GetSTSRegionalEndpoint(v)
+ if err != nil {
+ return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
+ }
+ cfg.STSRegionalEndpoint = STSRegionalEndpoint
+ }
+ }
+
+ return cfg, nil
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) != 0 {
+ *dst = v
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 000000000..15fa64769
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,699 @@
+package session
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/csm"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const (
+ // ErrCodeSharedConfig represents an error that occurs in the shared
+ // configuration logic
+ ErrCodeSharedConfig = "SharedConfigErr"
+)
+
+// ErrSharedConfigSourceCollision will be returned if a section contains both
+// source_profile and credential_source
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
+
+// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
+// variables are empty and Environment was set as the credential source
+var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
+
+// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
+var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ConfigProvider.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+func New(cfgs ...*aws.Config) *Session {
+ // load initial config from environment
+ envCfg, envErr := loadEnvConfig()
+
+ if envCfg.EnableSharedConfig {
+ var cfg aws.Config
+ cfg.MergeIn(cfgs...)
+ s, err := NewSessionWithOptions(Options{
+ Config: cfg,
+ SharedConfigState: SharedConfigEnable,
+ })
+ if err != nil {
+ // Old session.New expected all errors to be discovered when
+ // a request is made, and would report the errors then. This
+ // needs to be replicated if an error occurs while creating
+ // the session.
+ msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
+ "Use session.NewSession to handle errors occurring during session creation."
+
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s = &Session{Config: defaults.Config()}
+ s.logDeprecatedNewSessionError(msg, err, cfgs)
+ }
+
+ return s
+ }
+
+ s := deprecatedNewSession(cfgs...)
+ if envErr != nil {
+ msg := "failed to load env config"
+ s.logDeprecatedNewSessionError(msg, envErr, cfgs)
+ }
+
+ if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
+ if l := s.Config.Logger; l != nil {
+ l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
+ }
+ } else if csmCfg.Enabled {
+ err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
+ if err != nil {
+ msg := "failed to enable CSM"
+ s.logDeprecatedNewSessionError(msg, err, cfgs)
+ }
+ }
+
+ return s
+}
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created, such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
+
+ return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+ // SharedConfigStateFromEnv does not override any state of the
+ // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+ // SharedConfigState type.
+ SharedConfigStateFromEnv SharedConfigState = iota
+
+ // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and disables the shared config functionality.
+ SharedConfigDisable
+
+ // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and enables the shared config functionality.
+ SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+ // Provides config values for the SDK to use when creating service clients
+ // and making API requests to services. Any value set in with this field
+ // will override the associated value provided by the SDK defaults,
+ // environment or config files where relevant.
+ //
+ // If not set, configuration values from from SDK defaults, environment,
+ // config will be used.
+ Config aws.Config
+
+ // Overrides the config profile the Session should be created from. If not
+ // set the value of the environment variable will be loaded (AWS_PROFILE,
+ // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+ //
+ // If not set and environment variables are not set the "default"
+ // (DefaultSharedConfigProfile) will be used as the profile to load the
+ // session config from.
+ Profile string
+
+ // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+ // environment variable. By default a Session will be created using the
+ // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+ //
+ // Setting this value to SharedConfigEnable or SharedConfigDisable
+ // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+ // and enable or disable the shared config functionality.
+ SharedConfigState SharedConfigState
+
+ // Ordered list of files the session will load configuration from.
+ // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
+ SharedConfigFiles []string
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // When the SDK's shared config is configured to assume a role this option
+ // may be provided to set the expiry duration of the STS credentials.
+ // Defaults to 15 minutes if not set as documented in the
+ // stscreds.AssumeRoleProvider.
+ AssumeRoleDuration time.Duration
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+ // client. If the client's Transport is not a http.Transport an error will be
+ // returned. If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // The Session option CustomCABundle is also available when creating sessions
+ // to also enable this feature. CustomCABundle session option field has priority
+ // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ CustomCABundle io.Reader
+
+ // The handlers that the session and all API clients will be created with.
+ // This must be a complete set of handlers. Use the defaults.Handlers()
+ // function to initialize this value before changing the handlers to be
+ // used by the SDK.
+ Handlers request.Handlers
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
+//
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
+//
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+ var envCfg envConfig
+ var err error
+ if opts.SharedConfigState == SharedConfigEnable {
+ envCfg, err = loadSharedEnvConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to load shared config, %v", err)
+ }
+ } else {
+ envCfg, err = loadEnvConfig()
+ if err != nil {
+ return nil, fmt.Errorf("failed to load environment config, %v", err)
+ }
+ }
+
+ if len(opts.Profile) != 0 {
+ envCfg.Profile = opts.Profile
+ }
+
+ switch opts.SharedConfigState {
+ case SharedConfigDisable:
+ envCfg.EnableSharedConfig = false
+ case SharedConfigEnable:
+ envCfg.EnableSharedConfig = true
+ }
+
+ // Only use AWS_CA_BUNDLE if session option is not provided.
+ if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+ f, err := os.Open(envCfg.CustomCABundle)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to open custom CA bundle PEM file", err)
+ }
+ defer f.Close()
+ opts.CustomCABundle = f
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+// var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+ if err != nil {
+ panic(err)
+ }
+
+ return sess
+}
+
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ if cfg.EndpointResolver == nil {
+ // An endpoint resolver is required for a session to be able to provide
+ // endpoints for service client configurations.
+ cfg.EndpointResolver = endpoints.DefaultResolver()
+ }
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ return s
+}
+
+func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error {
+ if logger != nil {
+ logger.Log("Enabling CSM")
+ }
+
+ r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port))
+ if err != nil {
+ return err
+ }
+ r.InjectHandlers(handlers)
+
+ return nil
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+ cfg := defaults.Config()
+
+ handlers := opts.Handlers
+ if handlers.IsEmpty() {
+ handlers = defaults.Handlers()
+ }
+
+ // Get a merged version of the user provided config to determine if
+ // credentials were.
+ userCfg := &aws.Config{}
+ userCfg.MergeIn(cfgs...)
+ cfg.MergeIn(userCfg)
+
+ // Ordered config files will be loaded in with later files overwriting
+ // previous config file values.
+ var cfgFiles []string
+ if opts.SharedConfigFiles != nil {
+ cfgFiles = opts.SharedConfigFiles
+ } else {
+ cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
+ }
+
+ // Load additional config from file(s)
+ sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
+ if err != nil {
+ if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) {
+ // Special case where the user has not explicitly specified an AWS_PROFILE,
+ // or session.Options.profile, shared config is not enabled, and the
+ // environment has credentials, allow the shared config file to fail to
+ // load since the user has already provided credentials, and nothing else
+ // is required to be read file. Github(aws/aws-sdk-go#2455)
+ } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
+ return nil, err
+ }
+ }
+
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+
+ if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil {
+ if l := s.Config.Logger; l != nil {
+ l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
+ }
+ } else if csmCfg.Enabled {
+ err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+}
+
+type csmConfig struct {
+ Enabled bool
+ Host string
+ Port string
+ ClientID string
+}
+
+var csmProfileName = "aws_csm"
+
+func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
+ if envCfg.CSMEnabled != nil {
+ if *envCfg.CSMEnabled {
+ return csmConfig{
+ Enabled: true,
+ ClientID: envCfg.CSMClientID,
+ Host: envCfg.CSMHost,
+ Port: envCfg.CSMPort,
+ }, nil
+ }
+ return csmConfig{}, nil
+ }
+
+ sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false)
+ if err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
+ return csmConfig{}, err
+ }
+ }
+ if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true {
+ return csmConfig{
+ Enabled: true,
+ ClientID: sharedCfg.CSMClientID,
+ Host: sharedCfg.CSMHost,
+ Port: sharedCfg.CSMPort,
+ }, nil
+ }
+
+ return csmConfig{}, nil
+}
+
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+ var t *http.Transport
+ switch v := s.Config.HTTPClient.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if s.Config.HTTPClient.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ }
+ }
+ if t == nil {
+ // Nil transport implies `http.DefaultTransport` should be used. Since
+ // the SDK cannot modify, nor copy the `DefaultTransport` specifying
+ // the values the next closest behavior.
+ t = getCABundleTransport()
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ s.Config.HTTPClient.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config,
+ envCfg envConfig, sharedCfg sharedConfig,
+ handlers request.Handlers,
+ sessOpts Options,
+) error {
+
+ // Region if not already set by user
+ if len(aws.StringValue(cfg.Region)) == 0 {
+ if len(envCfg.Region) > 0 {
+ cfg.WithRegion(envCfg.Region)
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+ cfg.WithRegion(sharedCfg.Region)
+ }
+ }
+
+ if cfg.EnableEndpointDiscovery == nil {
+ if envCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
+ } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
+ }
+ }
+
+ // Regional Endpoint flag for STS endpoint resolving
+ mergeSTSRegionalEndpointConfig(cfg, envCfg, sharedCfg)
+
+ // Configure credentials if not already set by the user when creating the
+ // Session.
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+ creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
+ if err != nil {
+ return err
+ }
+ cfg.Credentials = creds
+ }
+
+ return nil
+}
+
+// mergeSTSRegionalEndpointConfig function merges the STSRegionalEndpoint into cfg from
+// envConfig and SharedConfig with envConfig being given precedence over SharedConfig
+func mergeSTSRegionalEndpointConfig(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
+
+ cfg.STSRegionalEndpoint = envCfg.STSRegionalEndpoint
+
+ if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint {
+ cfg.STSRegionalEndpoint = sharedCfg.STSRegionalEndpoint
+ }
+
+ if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint {
+ cfg.STSRegionalEndpoint = endpoints.LegacySTSEndpoint
+ }
+ return nil
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current Session, copying the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ region := aws.StringValue(s.Config.Region)
+ resolved, err := s.resolveEndpoint(service, region, s.Config)
+ if err != nil && s.Config.Logger != nil {
+ s.Config.Logger.Log(fmt.Sprintf(
+ "ERROR: unable to resolve endpoint for service %q, region %q, err: %v",
+ service, region, err))
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }
+}
+
+func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
+
+ if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
+ return endpoints.ResolvedEndpoint{
+ URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
+ SigningRegion: region,
+ }, nil
+ }
+
+ resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
+ // Support for STSRegionalEndpoint where the STSRegionalEndpoint is
+ // provided in envConfig or sharedConfig with envConfig getting
+ // precedence.
+ opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+ },
+ )
+ if err != nil {
+ return endpoints.ResolvedEndpoint{}, err
+ }
+
+ return resolved, nil
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+ if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+ resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = aws.StringValue(s.Config.Region)
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }
+}
+
+// logDeprecatedNewSessionError function enables error handling for session
+func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100644
index 000000000..857466896
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,508 @@
+package session
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/internal/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required (or credential_source)
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+
+ // CSM options
+ csmEnabledKey = `csm_enabled`
+ csmHostKey = `csm_host`
+ csmPortKey = `csm_port`
+ csmClientIDKey = `csm_client_id`
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+
+ // External Credential Process
+ credentialProcessKey = `credential_process` // optional
+
+ // Web Identity Token File
+ webIdentityTokenFileKey = `web_identity_token_file` // optional
+
+ // Additional config fields for regional or legacy endpoints
+ stsRegionalEndpointSharedKey = `sts_regional_endpoints`
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+)
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+ // Credentials values from the config file. Both aws_access_key_id and
+ // aws_secret_access_key must be provided together in the same file to be
+ // considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of
+ // the other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Creds credentials.Value
+
+ CredentialSource string
+ CredentialProcess string
+ WebIdentityTokenFile string
+
+ RoleARN string
+ RoleSessionName string
+ ExternalID string
+ MFASerial string
+
+ SourceProfileName string
+ SourceProfile *sharedConfig
+
+ // Region is the region the SDK should use for looking up AWS service
+ // endpoints and signing requests.
+ //
+ // region
+ Region string
+
+ // EnableEndpointDiscovery can be enabled in the shared config by setting
+ // endpoint_discovery_enabled to true
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery *bool
+ // CSM Options
+ CSMEnabled *bool
+ CSMHost string
+ CSMPort string
+ CSMClientID string
+
+ // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service
+ //
+ // sts_regional_endpoints = sts_regional_endpoint
+ // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint`
+ STSRegionalEndpoint endpoints.STSRegionalEndpoint
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData ini.Sections
+}
+
+// loadSharedConfig retrieves the configuration from the list of files using
+// the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of
+// A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) {
+ if len(profile) == 0 {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return sharedConfig{}, err
+ }
+
+ cfg := sharedConfig{}
+ profiles := map[string]struct{}{}
+ if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil {
+ return sharedConfig{}, err
+ }
+
+ return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ } else if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: sections,
+ })
+ }
+
+ return files, nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
+ // Trim files from the list that don't exist.
+ var skippedFiles int
+ var profileNotFoundErr error
+ for _, f := range files {
+ if err := cfg.setFromIniFile(profile, f, exOpts); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ // Ignore profiles not defined in individual files.
+ profileNotFoundErr = err
+ skippedFiles++
+ continue
+ }
+ return err
+ }
+ }
+ if skippedFiles == len(files) {
+ // If all files were skipped because the profile is not found, return
+ // the original profile not found error.
+ return profileNotFoundErr
+ }
+
+ if _, ok := profiles[profile]; ok {
+ // if this is the second instance of the profile the Assume Role
+ // options must be cleared because they are only valid for the
+ // first reference of a profile. The self linked instance of the
+ // profile only have credential provider options.
+ cfg.clearAssumeRoleOptions()
+ } else {
+ // First time a profile has been seen, It must either be a assume role
+ // or credentials. Assert if the credential type requires a role ARN,
+ // the ARN is also set.
+ if err := cfg.validateCredentialsRequireARN(profile); err != nil {
+ return err
+ }
+ }
+ profiles[profile] = struct{}{}
+
+ if err := cfg.validateCredentialType(); err != nil {
+ return err
+ }
+
+ // Link source profiles for assume roles
+ if len(cfg.SourceProfileName) != 0 {
+ // Linked profile via source_profile ignore credential provider
+ // options, the source profile must provide the credentials.
+ cfg.clearCredentialOptions()
+
+ srcCfg := &sharedConfig{}
+ err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts)
+ if err != nil {
+ // SourceProfile that doesn't exist is an error in configuration.
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ err = SharedConfigAssumeRoleError{
+ RoleARN: cfg.RoleARN,
+ SourceProfile: cfg.SourceProfileName,
+ }
+ }
+ return err
+ }
+
+ if !srcCfg.hasCredentials() {
+ return SharedConfigAssumeRoleError{
+ RoleARN: cfg.RoleARN,
+ SourceProfile: cfg.SourceProfileName,
+ }
+ }
+
+ cfg.SourceProfile = srcCfg
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using the profile
+// provided. A sharedConfig pointer type value is used so that multiple config
+// file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For
+// example if a config file only includes aws_access_key_id but no
+// aws_secret_access_key the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error {
+ section, ok := file.IniData.GetSection(profile)
+ if !ok {
+ // Fallback to to alternate profile name: profile
+ section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if !ok {
+ return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
+ }
+ }
+
+ if exOpts {
+ // Assume Role Parameters
+ updateString(&cfg.RoleARN, section, roleArnKey)
+ updateString(&cfg.ExternalID, section, externalIDKey)
+ updateString(&cfg.MFASerial, section, mfaSerialKey)
+ updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
+ updateString(&cfg.SourceProfileName, section, sourceProfileKey)
+ updateString(&cfg.CredentialSource, section, credentialSourceKey)
+ updateString(&cfg.Region, section, regionKey)
+
+ if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
+ sre, err := endpoints.GetSTSRegionalEndpoint(v)
+ if err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %s, %v",
+ stsRegionalEndpointKey, file.Filename, err)
+ }
+ cfg.STSRegionalEndpoint = sre
+ }
+ }
+
+ updateString(&cfg.CredentialProcess, section, credentialProcessKey)
+ updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey)
+
+ // Shared Credentials
+ creds := credentials.Value{
+ AccessKeyID: section.String(accessKeyIDKey),
+ SecretAccessKey: section.String(secretAccessKey),
+ SessionToken: section.String(sessionTokenKey),
+ ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ if creds.HasKeys() {
+ cfg.Creds = creds
+ }
+
+ // Endpoint discovery
+ updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
+
+ // CSM options
+ updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey)
+ updateString(&cfg.CSMHost, section, csmHostKey)
+ updateString(&cfg.CSMPort, section, csmPortKey)
+ updateString(&cfg.CSMClientID, section, csmClientIDKey)
+
+ return nil
+}
+
+func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
+ var credSource string
+
+ switch {
+ case len(cfg.SourceProfileName) != 0:
+ credSource = sourceProfileKey
+ case len(cfg.CredentialSource) != 0:
+ credSource = credentialSourceKey
+ case len(cfg.WebIdentityTokenFile) != 0:
+ credSource = webIdentityTokenFileKey
+ }
+
+ if len(credSource) != 0 && len(cfg.RoleARN) == 0 {
+ return CredentialRequiresARNError{
+ Type: credSource,
+ Profile: profile,
+ }
+ }
+
+ return nil
+}
+
+func (cfg *sharedConfig) validateCredentialType() error {
+ // Only one or no credential type can be defined.
+ if !oneOrNone(
+ len(cfg.SourceProfileName) != 0,
+ len(cfg.CredentialSource) != 0,
+ len(cfg.CredentialProcess) != 0,
+ len(cfg.WebIdentityTokenFile) != 0,
+ ) {
+ return ErrSharedConfigSourceCollision
+ }
+
+ return nil
+}
+
+func (cfg *sharedConfig) hasCredentials() bool {
+ switch {
+ case len(cfg.SourceProfileName) != 0:
+ case len(cfg.CredentialSource) != 0:
+ case len(cfg.CredentialProcess) != 0:
+ case len(cfg.WebIdentityTokenFile) != 0:
+ case cfg.Creds.HasKeys():
+ default:
+ return false
+ }
+
+ return true
+}
+
+func (cfg *sharedConfig) clearCredentialOptions() {
+ cfg.CredentialSource = ""
+ cfg.CredentialProcess = ""
+ cfg.WebIdentityTokenFile = ""
+ cfg.Creds = credentials.Value{}
+}
+
+func (cfg *sharedConfig) clearAssumeRoleOptions() {
+ cfg.RoleARN = ""
+ cfg.ExternalID = ""
+ cfg.MFASerial = ""
+ cfg.RoleSessionName = ""
+ cfg.SourceProfileName = ""
+}
+
+func oneOrNone(bs ...bool) bool {
+ var count int
+
+ for _, b := range bs {
+ if b {
+ count++
+ if count > 1 {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// updateString will only update the dst with the value in the section key, key
+// is present in the section.
+func updateString(dst *string, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = section.String(key)
+}
+
+// updateBoolPtr will only update the dst with the value in the section key,
+// key is present in the section.
+func updateBoolPtr(dst **bool, section ini.Section, key string) {
+ if !section.Has(key) {
+ return
+ }
+ *dst = new(bool)
+ **dst = section.Bool(key)
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+ return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+ return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+ Profile string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+ return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+ return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ RoleARN string
+ SourceProfile string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+ return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+ return fmt.Sprintf(
+ "failed to load assume role for %s, source profile %s has no shared credentials",
+ e.RoleARN, e.SourceProfile,
+ )
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+// CredentialRequiresARNError provides the error for shared config credentials
+// that are incorrectly configured in the shared config or credentials file.
+type CredentialRequiresARNError struct {
+ // type of credentials that were configured.
+ Type string
+
+ // Profile name the credentials were in.
+ Profile string
+}
+
+// Code is the short id of the error.
+func (e CredentialRequiresARNError) Code() string {
+ return "CredentialRequiresARNError"
+}
+
+// Message is the description of the error
+func (e CredentialRequiresARNError) Message() string {
+ return fmt.Sprintf(
+ "credential type %s requires role_arn, profile %s",
+ e.Type, e.Profile,
+ )
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e CredentialRequiresARNError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e CredentialRequiresARNError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644
index 000000000..244c86da0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 000000000..6aa2ed241
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 000000000..bd082e9d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100644
index 000000000..8104793aa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,806 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials *credentials.Credentials
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevelType for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevelType
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disables the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // currentTimeFn returns the time value which represents the current time.
+ // This value should only be used for testing. If it is nil the default
+ // time.Now will be used.
+ currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credentials,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues credentials.Value
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, false, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, true, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
+ currentTimeFn := v4.currentTimeFn
+ if currentTimeFn == nil {
+ currentTimeFn = time.Now
+ }
+
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: isPresign,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = currentTimeFn()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.Get()
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.sanitizeHostForHeader()
+ ctx.assignAmzQueryValues()
+ if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
+ return nil, err
+ }
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) sanitizeHostForHeader() {
+ request.SanitizeHostForHeader(ctx.Request)
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler should only be used with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now, opts...)
+ },
+ }
+}
+
+// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
+// function passed in. Behaves the same as SignSDKRequest with the exception
+// the request is signed with the value returned by the current time function.
+func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel.Value()
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ v4.currentTimeFn = curTimeFn
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ curTime := curTimeFn()
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = curTime
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ if err := ctx.buildBodyDigest(); err != nil {
+ return err
+ }
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ "Signature=" + ctx.signature,
+ }
+ ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+
+ return nil
+}
+
+func (ctx *signingCtx) buildTime() {
+ ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+ ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = strings.Join([]string{
+ ctx.formattedShortTime,
+ ctx.Region,
+ ctx.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ if ctx.Request.Host != "" {
+ headerValues[i] = "host:" + ctx.Request.Host
+ } else {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ }
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(ctx.SignedHeaderVals[k], ",")
+ }
+ }
+ stripExcessSpaces(headerValues)
+ ctx.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ ctx.formattedTime,
+ ctx.credentialString,
+ hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ secret := ctx.credValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+ region := makeHmac(date, []byte(ctx.Region))
+ service := makeHmac(region, []byte(ctx.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() error {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ includeSHA256Header := ctx.unsignedPayload ||
+ ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "glacier"
+
+ s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+
+ if ctx.unsignedPayload || s3Presign {
+ hash = "UNSIGNED-PAYLOAD"
+ includeSHA256Header = !s3Presign
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ if !aws.IsReaderSeekable(ctx.Body) {
+ return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
+ }
+ hashBytes, err := makeSha256Reader(ctx.Body)
+ if err != nil {
+ return err
+ }
+ hash = hex.EncodeToString(hashBytes)
+ }
+
+ if includeSHA256Header {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+
+ return nil
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
+ hash := sha256.New()
+ start, err := reader.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ // ensure error is return if unable to seek back to start of payload.
+ _, err = reader.Seek(start, sdkio.SeekStart)
+ }()
+
+ // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
+ // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
+ size, err := aws.SeekerLen(reader)
+ if err != nil {
+ io.Copy(hash, reader)
+ } else {
+ io.CopyN(hash, reader, size)
+ }
+
+ return hash.Sum(nil), nil
+}
+
+const doubleSpace = " "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ vals[i] = string(buf[:m])
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 000000000..455091540
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,207 @@
+package aws
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
+//
+// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
+// operation's input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if the operation
+// requires payload signing.
+//
+// Note: If using With S3 PutObject to stream an object upload The SDK's S3
+// Upload manager (s3manager.Uploader) provides support for streaming with the
+// ability to retry network errors.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// IsReaderSeekable returns if the underlying reader type can be seeked. A
+// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
+// type.
+func IsReaderSeekable(r io.Reader) bool {
+ switch v := r.(type) {
+ case ReaderSeekerCloser:
+ return v.IsSeeker()
+ case *ReaderSeekerCloser:
+ return v.IsSeeker()
+ case io.ReadSeeker:
+ return true
+ default:
+ return false
+ }
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return seekerLen(s)
+ }
+
+ return -1, nil
+}
+
+// SeekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func SeekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case ReaderSeekerCloser:
+ return v.GetLen()
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return seekerLen(s)
+}
+
+func seekerLen(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, sdkio.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, sdkio.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644
index 000000000..6192b2455
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644
index 000000000..0210d2720
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 000000000..120657a0b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.25.25"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
new file mode 100644
index 000000000..e83a99886
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
new file mode 100644
index 000000000..0895d53cb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
new file mode 100644
index 000000000..0b76999ba
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
@@ -0,0 +1,35 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
new file mode 100644
index 000000000..25ce0fe13
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
@@ -0,0 +1,29 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> value stmt'
+// stmt' -> epsilon | op stmt
+// value -> number | string | boolean | quoted_string
+//
+// section -> [ section'
+// section' -> value section_close
+// section_close -> ]
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
+package ini
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
new file mode 100644
index 000000000..04345a54c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
new file mode 100644
index 000000000..91ba2a59d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
new file mode 100644
index 000000000..8d462f77e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
new file mode 100644
index 000000000..3b0ca7afe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
@@ -0,0 +1,51 @@
+package ini
+
+import (
+ "io"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
+ }
+ defer f.Close()
+
+ return Parse(f)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
new file mode 100644
index 000000000..582c024ad
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
@@ -0,0 +1,165 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // ErrCodeUnableToReadFile is used when a file is failed to be
+ // opened or read from.
+ ErrCodeUnableToReadFile = "FailedRead"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
new file mode 100644
index 000000000..cf9fad81e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -0,0 +1,356 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// State enums for the parse table
+const (
+ InvalidState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]int{
+ ASTKindStart: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: map[TokenType]int{
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: ValueState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ },
+ ASTKindStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenOp: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SectionState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: map[TokenType]int{
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ // being in a skip state with no tokens will break out of
+ // the parse loop since there is nothing left to process.
+ if len(tokens) == 0 {
+ break loop
+ }
+ // if should skip is true, we skip the tokens until should skip is set to false.
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ k = trimSpaces(k)
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assigning a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExpr:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stack.Push(k)
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ k.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+ // If OpenScopeState is not at the start, we must mark the previous ast as complete
+ //
+ // for example: if previous ast was a skip statement;
+ // we should mark it as complete before we create a new statement
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ k = trimSpaces(k)
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
+ k, tok.Type()))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
+ }
+
+ // returns a sublist which excludes the start symbol
+ return stack.List(), nil
+}
+
+// trimSpaces will trim spaces on the left and right hand side of
+// the literal.
+func trimSpaces(k AST) AST {
+ // trim left hand side of spaces
+ for i := 0; i < len(k.Root.raw); i++ {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[1:]
+ i--
+ }
+
+ // trim right hand side of spaces
+ for i := len(k.Root.raw) - 1; i >= 0; i-- {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
+ }
+
+ return k
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
new file mode 100644
index 000000000..24df543d3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
@@ -0,0 +1,324 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = runeCompare(v.raw, runesTrue)
+ }
+
+ // issue 2253
+ //
+ // if the value trying to be parsed is too large, then we will use
+ // the 'StringType' and raw value instead.
+ if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
+ v.Type = StringType
+ v.str = string(raw)
+ err = nil
+ }
+
+ return v, err
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
new file mode 100644
index 000000000..e52ac399f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
new file mode 100644
index 000000000..a45c0bc56
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
new file mode 100644
index 000000000..8a84c7cbe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
new file mode 100644
index 000000000..457287019
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
@@ -0,0 +1,43 @@
+package ini
+
+import "fmt"
+
+const (
+ // ErrCodeParseError is returned when a parsing error
+ // has occurred.
+ ErrCodeParseError = "INIParseError"
+)
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+// Code will return the ErrCodeParseError
+func (err *ParseError) Code() string {
+ return ErrCodeParseError
+}
+
+// Message returns the error's message
+func (err *ParseError) Message() string {
+ return err.msg
+}
+
+// OrigError return nothing since there will never be any
+// original error.
+func (err *ParseError) OrigError() error {
+ return nil
+}
+
+func (err *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", err.Code(), err.Message())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
new file mode 100644
index 000000000..7f01cf7c7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
new file mode 100644
index 000000000..f82095ba2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
new file mode 100644
index 000000000..da7a4049c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
@@ -0,0 +1,45 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = ; this section will be skipped
+// a=b
+// c=d
+// bar=baz ; this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ // should skip state will be modified only if previous token was new line (NL);
+ // and the current token is not WhiteSpace (WS).
+ if s.shouldSkip &&
+ s.prevTok.Type() == TokenNL &&
+ tok.Type() != TokenWS {
+ s.Continue()
+ return false
+ }
+ s.prevTok = tok
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ // empty token is assigned as we return to default state, when should skip is false
+ s.prevTok = emptyToken
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
new file mode 100644
index 000000000..18f3fe893
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
@@ -0,0 +1,35 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini definition.
+//
+// grammar:
+// comment -> #comment' | ;comment'
+// comment' -> epsilon | value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
new file mode 100644
index 000000000..305999d29
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
new file mode 100644
index 000000000..94841c324
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -0,0 +1,166 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+ scope string
+ Sections Sections
+}
+
+// NewDefaultVisitor return a DefaultVisitor
+func NewDefaultVisitor() *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ if rhs.Root.Type() != TokenLit {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ t.values[key] = v
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+ v.Sections.container[name] = Section{}
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ Name string
+ values values
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) bool {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
new file mode 100644
index 000000000..99915f7f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
new file mode 100644
index 000000000..7ffb4ae06
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go
new file mode 100644
index 000000000..6c443988b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go
@@ -0,0 +1,12 @@
+package sdkio
+
+const (
+ // Byte is 8 bits
+ Byte int64 = 1
+ // KibiByte (KiB) is 1024 Bytes
+ KibiByte = Byte * 1024
+ // MebiByte (MiB) is 1024 KiB
+ MebiByte = KibiByte * 1024
+ // GibiByte (GiB) is 1024 MiB
+ GibiByte = MebiByte * 1024
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
new file mode 100644
index 000000000..5aa9137e0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
@@ -0,0 +1,10 @@
+// +build !go1.7
+
+package sdkio
+
+// Copy of Go 1.7 io package's Seeker constants.
+const (
+ SeekStart = 0 // seek relative to the origin of the file
+ SeekCurrent = 1 // seek relative to the current offset
+ SeekEnd = 2 // seek relative to the end
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
new file mode 100644
index 000000000..e5f005613
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
@@ -0,0 +1,12 @@
+// +build go1.7
+
+package sdkio
+
+import "io"
+
+// Alias for Go 1.7 io package Seeker constants
+const (
+ SeekStart = io.SeekStart // seek relative to the origin of the file
+ SeekCurrent = io.SeekCurrent // seek relative to the current offset
+ SeekEnd = io.SeekEnd // seek relative to the end
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
new file mode 100644
index 000000000..44898eed0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
@@ -0,0 +1,15 @@
+// +build go1.10
+
+package sdkmath
+
+import "math"
+
+// Round returns the nearest integer, rounding half away from zero.
+//
+// Special cases are:
+// Round(±0) = ±0
+// Round(±Inf) = ±Inf
+// Round(NaN) = NaN
+func Round(x float64) float64 {
+ return math.Round(x)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
new file mode 100644
index 000000000..810ec7f08
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
@@ -0,0 +1,56 @@
+// +build !go1.10
+
+package sdkmath
+
+import "math"
+
+// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
+// Go version prior to Go 1.10.
+const (
+ uvone = 0x3FF0000000000000
+ mask = 0x7FF
+ shift = 64 - 11 - 1
+ bias = 1023
+ signMask = 1 << 63
+ fracMask = 1<= 0.5 {
+ // return t + Copysign(1, x)
+ // }
+ // return t
+ // }
+ bits := math.Float64bits(x)
+ e := uint(bits>>shift) & mask
+ if e < bias {
+ // Round abs(x) < 1 including denormals.
+ bits &= signMask // +-0
+ if e == bias-1 {
+ bits |= uvone // +-1
+ }
+ } else if e < bias+shift {
+ // Round any abs(x) >= 1 containing a fractional component [0,1).
+ //
+ // Numbers with larger exponents are returned unchanged since they
+ // must be either an integer, infinity, or NaN.
+ const half = 1 << (shift - 1)
+ e -= bias
+ bits += half >> e
+ bits &^= fracMask >> e
+ }
+ return math.Float64frombits(bits)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
new file mode 100644
index 000000000..0c9802d87
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
@@ -0,0 +1,29 @@
+package sdkrand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// SeededRand is a new RNG using a thread safe implementation of rand.Source
+var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
new file mode 100644
index 000000000..f4651da2d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
@@ -0,0 +1,11 @@
+// +build go1.6
+
+package sdkrand
+
+import "math/rand"
+
+// Read provides the stub for math.Rand.Read method support for go version's
+// 1.6 and greater.
+func Read(r *rand.Rand, p []byte) (int, error) {
+ return r.Read(p)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
new file mode 100644
index 000000000..b1d93a33d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
@@ -0,0 +1,24 @@
+// +build !go1.6
+
+package sdkrand
+
+import "math/rand"
+
+// Read backfills Go 1.6's math.Rand.Reader for Go 1.5
+func Read(r *rand.Rand, p []byte) (n int, err error) {
+ // Copy of Go standard libraries math package's read function not added to
+ // standard library until Go 1.6.
+ var pos int8
+ var val int64
+ for n = 0; n < len(p); n++ {
+ if pos == 0 {
+ val = r.Int63()
+ pos = 7
+ }
+ p[n] = byte(val)
+ val >>= 8
+ pos--
+ }
+
+ return n, err
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
new file mode 100644
index 000000000..38ea61afe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
@@ -0,0 +1,23 @@
+package sdkuri
+
+import (
+ "path"
+ "strings"
+)
+
+// PathJoin will join the elements of the path delimited by the "/"
+// character. Similar to path.Join with the exception the trailing "/"
+// character is preserved if present.
+func PathJoin(elems ...string) string {
+ if len(elems) == 0 {
+ return ""
+ }
+
+ hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
+ str := path.Join(elems...)
+ if hasTrailing && str != "/" {
+ str += "/"
+ }
+
+ return str
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
new file mode 100644
index 000000000..7da8a49ce
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
@@ -0,0 +1,12 @@
+package shareddefaults
+
+const (
+ // ECSCredsProviderEnvVar is an environmental variable key used to
+ // determine which path needs to be hit.
+ ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// ECSContainerCredentialsURI is the endpoint to retrieve container
+// credentials. This can be overridden to test to ensure the credential process
+// is behaving correctly.
+var ECSContainerCredentialsURI = "http://169.254.170.2"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
new file mode 100644
index 000000000..ebcbc2b40
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
@@ -0,0 +1,40 @@
+package shareddefaults
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
new file mode 100644
index 000000000..d7d42db0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
@@ -0,0 +1,68 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ValidateEndpointHostHandler is a request handler that will validate the
+// request endpoint's hosts is a valid RFC 3986 host.
+var ValidateEndpointHostHandler = request.NamedHandler{
+ Name: "awssdk.protocol.ValidateEndpointHostHandler",
+ Fn: func(r *request.Request) {
+ err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
+ if err != nil {
+ r.Error = err
+ }
+ },
+}
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(opName, host string) error {
+ paramErrs := request.ErrInvalidParams{Context: opName}
+ labels := strings.Split(host, ".")
+
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ paramErrs.Add(request.NewErrParamFormat(
+ "endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
+ }
+ }
+
+ if len(host) > 255 {
+ paramErrs.Add(request.NewErrParamMaxLen(
+ "endpoint host", 255, host,
+ ))
+ }
+
+ if paramErrs.Len() > 0 {
+ return paramErrs
+ }
+ return nil
+}
+
+// ValidHostLabel returns if the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
new file mode 100644
index 000000000..915b0fcaf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
@@ -0,0 +1,54 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// HostPrefixHandlerName is the handler name for the host prefix request
+// handler.
+const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
+
+// NewHostPrefixHandler constructs a build handler
+func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
+ builder := HostPrefixBuilder{
+ Prefix: prefix,
+ LabelsFn: labelsFn,
+ }
+
+ return request.NamedHandler{
+ Name: HostPrefixHandlerName,
+ Fn: builder.Build,
+ }
+}
+
+// HostPrefixBuilder provides the request handler to expand and prepend
+// the host prefix into the operation's request endpoint host.
+type HostPrefixBuilder struct {
+ Prefix string
+ LabelsFn func() map[string]string
+}
+
+// Build updates the passed in Request with the HostPrefix template expanded.
+func (h HostPrefixBuilder) Build(r *request.Request) {
+ if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
+ return
+ }
+
+ var labels map[string]string
+ if h.LabelsFn != nil {
+ labels = h.LabelsFn()
+ }
+
+ prefix := h.Prefix
+ for name, value := range labels {
+ prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
+ }
+
+ r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
+ if len(r.HTTPRequest.Host) > 0 {
+ r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 000000000..53831dff9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
new file mode 100644
index 000000000..864fb6704
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
@@ -0,0 +1,296 @@
+// Package jsonutil provides JSON serialization of AWS requests and responses.
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+var timeType = reflect.ValueOf(time.Time{}).Type()
+var byteSliceType = reflect.ValueOf([]byte{}).Type()
+
+// BuildJSON builds a JSON string for a given object v.
+func BuildJSON(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+
+ err := buildAny(reflect.ValueOf(v), &buf, "")
+ return buf.Bytes(), err
+}
+
+func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ origVal := value
+ value = reflect.Indirect(value)
+ if !value.IsValid() {
+ return nil
+ }
+
+ vtype := value.Type()
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if value.Type() != timeType {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return buildStruct(value, buf, tag)
+ case "list":
+ return buildList(value, buf, tag)
+ case "map":
+ return buildMap(value, buf, tag)
+ default:
+ return buildScalar(origVal, buf, tag)
+ }
+}
+
+func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ buf.WriteByte('{')
+
+ t := value.Type()
+ first := true
+ for i := 0; i < t.NumField(); i++ {
+ member := value.Field(i)
+
+ // This allocates the most memory.
+ // Additionally, we cannot skip nil fields due to
+ // idempotency auto filling.
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("json") == "-" {
+ continue
+ }
+ if field.Tag.Get("location") != "" {
+ continue // ignore non-body elements
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(member, field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(&token)
+ }
+
+ if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+ continue // ignore unset fields
+ }
+
+ if first {
+ first = false
+ } else {
+ buf.WriteByte(',')
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ writeString(name, buf)
+ buf.WriteString(`:`)
+
+ err := buildAny(member, buf, field.Tag)
+ if err != nil {
+ return err
+ }
+
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("[")
+
+ for i := 0; i < value.Len(); i++ {
+ buildAny(value.Index(i), buf, "")
+
+ if i < value.Len()-1 {
+ buf.WriteString(",")
+ }
+ }
+
+ buf.WriteString("]")
+
+ return nil
+}
+
+type sortedValues []reflect.Value
+
+func (sv sortedValues) Len() int { return len(sv) }
+func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
+
+func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("{")
+
+ sv := sortedValues(value.MapKeys())
+ sort.Sort(sv)
+
+ for i, k := range sv {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+
+ writeString(k.String(), buf)
+ buf.WriteString(`:`)
+
+ buildAny(value.MapIndex(k), buf, "")
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ // prevents allocation on the heap.
+ scratch := [64]byte{}
+ switch value := reflect.Indirect(v); value.Kind() {
+ case reflect.String:
+ writeString(value.String(), buf)
+ case reflect.Bool:
+ if value.Bool() {
+ buf.WriteString("true")
+ } else {
+ buf.WriteString("false")
+ }
+ case reflect.Int64:
+ buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
+ case reflect.Float64:
+ f := value.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
+ }
+ buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
+ default:
+ switch converted := value.Interface().(type) {
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.UnixTimeFormatName
+ }
+
+ ts := protocol.FormatTime(format, converted)
+ if format != protocol.UnixTimeFormatName {
+ ts = `"` + ts + `"`
+ }
+
+ buf.WriteString(ts)
+ case []byte:
+ if !value.IsNil() {
+ buf.WriteByte('"')
+ if len(converted) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
+ base64.StdEncoding.Encode(dst, converted)
+ buf.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, buf)
+ enc.Write(converted)
+ enc.Close()
+ }
+ buf.WriteByte('"')
+ }
+ case aws.JSONValue:
+ str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
+ if err != nil {
+ return fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ buf.WriteString(str)
+ default:
+ return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+ }
+ }
+ return nil
+}
+
+var hex = "0123456789abcdef"
+
+func writeString(s string, buf *bytes.Buffer) {
+ buf.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ if s[i] == '"' {
+ buf.WriteString(`\"`)
+ } else if s[i] == '\\' {
+ buf.WriteString(`\\`)
+ } else if s[i] == '\b' {
+ buf.WriteString(`\b`)
+ } else if s[i] == '\f' {
+ buf.WriteString(`\f`)
+ } else if s[i] == '\r' {
+ buf.WriteString(`\r`)
+ } else if s[i] == '\t' {
+ buf.WriteString(`\t`)
+ } else if s[i] == '\n' {
+ buf.WriteString(`\n`)
+ } else if s[i] < 32 {
+ buf.WriteString("\\u00")
+ buf.WriteByte(hex[s[i]>>4])
+ buf.WriteByte(hex[s[i]&0xF])
+ } else {
+ buf.WriteByte(s[i])
+ }
+ }
+ buf.WriteByte('"')
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
new file mode 100644
index 000000000..ea0da79a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -0,0 +1,250 @@
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
+// type. The value to unmarshal the json document into must be a pointer to the
+// type.
+func UnmarshalJSONError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := json.NewDecoder(body).Decode(v)
+ if err != nil {
+ msg := "failed decoding error message"
+ if err == io.EOF {
+ msg = "error message missing"
+ err = nil
+ }
+ return awserr.NewUnmarshalError(err, msg, errBuf.Bytes())
+ }
+
+ return nil
+}
+
+// UnmarshalJSON reads a stream and unmarshals the results in object v.
+func UnmarshalJSON(v interface{}, stream io.Reader) error {
+ var out interface{}
+
+ err := json.NewDecoder(stream).Decode(&out)
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ return unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ vtype := value.Type()
+ if vtype.Kind() == reflect.Ptr {
+ vtype = vtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := value.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return unmarshalStruct(value, data, tag)
+ case "list":
+ return unmarshalList(value, data, tag)
+ case "map":
+ return unmarshalMap(value, data, tag)
+ default:
+ return unmarshalScalar(value, data, tag)
+ }
+}
+
+func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a structure (%#v)", data)
+ }
+
+ t := value.Type()
+ if value.Kind() == reflect.Ptr {
+ if value.IsNil() { // create the structure if it's nil
+ s := reflect.New(value.Type().Elem())
+ value.Set(s)
+ value = s
+ }
+
+ value = value.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return unmarshalAny(value.FieldByName(payload), data, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ member := value.FieldByIndex(field.Index)
+ err := unmarshalAny(member, mapData[name], field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ listData, ok := data.([]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a list (%#v)", data)
+ }
+
+ if value.IsNil() {
+ l := len(listData)
+ value.Set(reflect.MakeSlice(value.Type(), l, l))
+ }
+
+ for i, c := range listData {
+ err := unmarshalAny(value.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a map (%#v)", data)
+ }
+
+ if value.IsNil() {
+ value.Set(reflect.MakeMap(value.Type()))
+ }
+
+ for k, v := range mapData {
+ kvalue := reflect.ValueOf(k)
+ vvalue := reflect.New(value.Type().Elem()).Elem()
+
+ unmarshalAny(vvalue, v, "")
+ value.SetMapIndex(kvalue, vvalue)
+ }
+
+ return nil
+}
+
+func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+
+ switch d := data.(type) {
+ case nil:
+ return nil // nothing to do here
+ case string:
+ switch value.Interface().(type) {
+ case *string:
+ value.Set(reflect.ValueOf(&d))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(b))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ // No need to use escaping as the value is a non-quoted string.
+ v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(v))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ case float64:
+ switch value.Interface().(type) {
+ case *int64:
+ di := int64(d)
+ value.Set(reflect.ValueOf(&di))
+ case *float64:
+ value.Set(reflect.ValueOf(&d))
+ case *time.Time:
+ // Time unmarshaled from a float64 can only be epoch seconds
+ t := time.Unix(int64(d), 0).UTC()
+ value.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ case bool:
+ switch value.Interface().(type) {
+ case *bool:
+ value.Set(reflect.ValueOf(&d))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ default:
+ return fmt.Errorf("unsupported JSON value (%v)", data)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
new file mode 100644
index 000000000..bfedc9fd4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
@@ -0,0 +1,110 @@
+// Package jsonrpc provides JSON RPC utilities for serialization of AWS
+// requests and responses.
+package jsonrpc
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+var emptyJSON = []byte("{}")
+
+// BuildHandler is a named request handler for building jsonrpc protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a JSON payload for a JSON RPC request.
+func Build(req *request.Request) {
+ var buf []byte
+ var err error
+ if req.ParamsFilled() {
+ buf, err = jsonutil.BuildJSON(req.Params)
+ if err != nil {
+ req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err)
+ return
+ }
+ } else {
+ buf = emptyJSON
+ }
+
+ if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
+ req.SetBufferBody(buf)
+ }
+
+ if req.ClientInfo.TargetPrefix != "" {
+ target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
+ req.HTTPRequest.Header.Add("X-Amz-Target", target)
+ }
+
+ // Only set the content type if one is not already specified and an
+ // JSONVersion is specified.
+ if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 {
+ jsonVersion := req.ClientInfo.JSONVersion
+ req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion)
+ }
+}
+
+// Unmarshal unmarshals a response for a JSON RPC service.
+func Unmarshal(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+ if req.DataFilled() {
+ err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+ }
+ }
+ return
+}
+
+// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
+func UnmarshalMeta(req *request.Request) {
+ rest.UnmarshalMeta(req)
+}
+
+// UnmarshalError unmarshals an error response for a JSON RPC service.
+func UnmarshalError(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+
+ var jsonErr jsonErrorResponse
+ err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+ return
+ }
+
+ codes := strings.SplitN(jsonErr.Code, "#", 2)
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"__type"`
+ Message string `json:"message"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
new file mode 100644
index 000000000..776d11018
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
+package protocol
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// EscapeMode is the mode that should be use for escaping a value
+type EscapeMode uint
+
+// The modes for escaping a value before it is marshaled, and unmarshaled.
+const (
+ NoEscape EscapeMode = iota
+ Base64Escape
+ QuotedEscape
+)
+
+// EncodeJSONValue marshals the value into a JSON string, and optionally base64
+// encodes the string before returning it.
+//
+// Will panic if the escape mode is unknown.
+func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ switch escape {
+ case NoEscape:
+ return string(b), nil
+ case Base64Escape:
+ return base64.StdEncoding.EncodeToString(b), nil
+ case QuotedEscape:
+ return strconv.Quote(string(b)), nil
+ }
+
+ panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
+}
+
+// DecodeJSONValue will attempt to decode the string input as a JSONValue.
+// Optionally decoding base64 the value first before JSON unmarshaling.
+//
+// Will panic if the escape mode is unknown.
+func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
+ var b []byte
+ var err error
+
+ switch escape {
+ case NoEscape:
+ b = []byte(v)
+ case Base64Escape:
+ b, err = base64.StdEncoding.DecodeString(v)
+ case QuotedEscape:
+ var u string
+ u, err = strconv.Unquote(v)
+ b = []byte(u)
+ default:
+ panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
new file mode 100644
index 000000000..e21614a12
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
@@ -0,0 +1,81 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// PayloadUnmarshaler provides the interface for unmarshaling a payload's
+// reader into a SDK shape.
+type PayloadUnmarshaler interface {
+ UnmarshalPayload(io.Reader, interface{}) error
+}
+
+// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
+// HandlerList. This provides the support for unmarshaling a payload reader to
+// a shape without needing a SDK request first.
+type HandlerPayloadUnmarshal struct {
+ Unmarshalers request.HandlerList
+}
+
+// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
+// the Unmarshalers HandlerList provided. Returns an error if unable
+// unmarshaling fails.
+func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
+ req := &request.Request{
+ HTTPRequest: &http.Request{},
+ HTTPResponse: &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: ioutil.NopCloser(r),
+ },
+ Data: v,
+ }
+
+ h.Unmarshalers.Run(req)
+
+ return req.Error
+}
+
+// PayloadMarshaler provides the interface for marshaling a SDK shape into and
+// io.Writer.
+type PayloadMarshaler interface {
+ MarshalPayload(io.Writer, interface{}) error
+}
+
+// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
+// This provides support for marshaling a SDK shape into an io.Writer without
+// needing a SDK request first.
+type HandlerPayloadMarshal struct {
+ Marshalers request.HandlerList
+}
+
+// MarshalPayload marshals the SDK shape into the io.Writer using the
+// Marshalers HandlerList provided. Returns an error if unable if marshal
+// fails.
+func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
+ req := request.New(
+ aws.Config{},
+ metadata.ClientInfo{},
+ request.Handlers{},
+ nil,
+ &request.Operation{HTTPMethod: "GET"},
+ v,
+ nil,
+ )
+
+ h.Marshalers.Run(req)
+
+ if req.Error != nil {
+ return req.Error
+ }
+
+ io.Copy(w, req.GetBody())
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644
index 000000000..0cb99eb57
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err)
+ return
+ }
+
+ if !r.IsPresigned() {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 000000000..75866d012
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,246 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag)
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ v.Set(name, protocol.FormatTime(format, value))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 000000000..f69c1efc9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,39 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 000000000..831b0110c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,69 @@
+package query
+
+import (
+ "encoding/xml"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+type xmlErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlResponseError struct {
+ xmlErrorResponse
+}
+
+func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ const svcUnavailableTagName = "ServiceUnavailableException"
+ const errorResponseTagName = "ErrorResponse"
+
+ switch start.Name.Local {
+ case svcUnavailableTagName:
+ e.Code = svcUnavailableTagName
+ e.Message = "service is unavailable"
+ return d.Skip()
+
+ case errorResponseTagName:
+ return d.DecodeElement(&e.xmlErrorResponse, &start)
+
+ default:
+ return fmt.Errorf("unknown error response tag, %v", start)
+ }
+}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var respErr xmlResponseError
+ err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ reqID := respErr.RequestID
+ if len(reqID) == 0 {
+ reqID = r.RequestID
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(respErr.Code, respErr.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 000000000..1301b149d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,310 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+var byteSliceType = reflect.TypeOf([]byte{})
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if kind := m.Kind(); kind == reflect.Ptr {
+ m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+ if !m.IsValid() {
+ continue
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ // Support the ability to customize values to be marshaled as a
+ // blob even though they were modeled as a string. Required for S3
+ // API operations like SSECustomerKey is modeled as stirng but
+ // required to be base64 encoded in request.
+ if field.Tag.Get("marshal-as") == "blob" {
+ m = m.Convert(byteSliceType)
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+
+ name = strings.TrimSpace(name)
+ str = strings.TrimSpace(str)
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+
+ }
+ keyStr := strings.TrimSpace(key.String())
+ str = strings.TrimSpace(str)
+
+ header.Add(prefix+keyStr, str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ if tag.Get("location") == "querystring" {
+ format = protocol.ISO8601TimeFormatName
+ }
+ }
+ str = protocol.FormatTime(format, value)
+ case aws.JSONValue:
+ if len(value) == 0 {
+ return "", errValueNotSet
+ }
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ str, err = protocol.EncodeJSONValue(value, escaping)
+ if err != nil {
+ return "", fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ default:
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 000000000..4366de2e1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 000000000..74e361e07
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,237 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ if len(headers) == 0 {
+ return nil
+ }
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ if len(out) != 0 {
+ r.Set(reflect.ValueOf(out))
+ }
+
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ switch tag.Get("type") {
+ case "jsonvalue":
+ if len(header) == 0 {
+ return nil
+ }
+ case "blob":
+ if len(header) == 0 {
+ return nil
+ }
+ default:
+ if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ }
+ t, err := protocol.ParseTime(format, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ m, err := protocol.DecodeJSONValue(header, escaping)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
new file mode 100644
index 000000000..05d4ff519
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -0,0 +1,84 @@
+package protocol
+
+import (
+ "math"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/internal/sdkmath"
+)
+
+// Names of time formats supported by the SDK
+const (
+ RFC822TimeFormatName = "rfc822"
+ ISO8601TimeFormatName = "iso8601"
+ UnixTimeFormatName = "unixTimestamp"
+)
+
+// Time formats supported by the SDK
+// Output time is intended to not contain decimals
+const (
+ // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
+ RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+ // This format is used for output time without seconds precision
+ RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
+
+ // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
+ ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
+
+ // This format is used for output time without seconds precision
+ ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
+)
+
+// IsKnownTimestampFormat returns if the timestamp format name
+// is know to the SDK's protocols.
+func IsKnownTimestampFormat(name string) bool {
+ switch name {
+ case RFC822TimeFormatName:
+ fallthrough
+ case ISO8601TimeFormatName:
+ fallthrough
+ case UnixTimeFormatName:
+ return true
+ default:
+ return false
+ }
+}
+
+// FormatTime returns a string value of the time.
+func FormatTime(name string, t time.Time) string {
+ t = t.UTC()
+
+ switch name {
+ case RFC822TimeFormatName:
+ return t.Format(RFC822OutputTimeFormat)
+ case ISO8601TimeFormatName:
+ return t.Format(ISO8601OutputTimeFormat)
+ case UnixTimeFormatName:
+ return strconv.FormatInt(t.Unix(), 10)
+ default:
+ panic("unknown timestamp format name, " + name)
+ }
+}
+
+// ParseTime attempts to parse the time given the format. Returns
+// the time if it was able to be parsed, and fails otherwise.
+func ParseTime(formatName, value string) (time.Time, error) {
+ switch formatName {
+ case RFC822TimeFormatName:
+ return time.Parse(RFC822TimeFormat, value)
+ case ISO8601TimeFormatName:
+ return time.Parse(ISO8601TimeFormat, value)
+ case UnixTimeFormatName:
+ v, err := strconv.ParseFloat(value, 64)
+ _, dec := math.Modf(v)
+ dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Unix(int64(v), int64(dec*(1e9))), nil
+ default:
+ panic("unknown timestamp format name, " + formatName)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 000000000..da1a68111
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 000000000..cf981fe95
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,306 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder. Error will be returned
+// if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ return buildXML(params, e, false)
+}
+
+func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, sorted)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ var payloadFields, nonPayloadFields int
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ nonPayloadFields++
+ continue
+ }
+ payloadFields++
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+ }
+
+ // Only case where the child shape is not added is if the shape only contains
+ // non-payload fields, e.g headers/query.
+ if !(payloadFields == 0 && nonPayloadFields > 0) {
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ str = protocol.FormatTime(format, converted)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
new file mode 100644
index 000000000..c1a511851
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
@@ -0,0 +1,32 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "strings"
+)
+
+type xmlAttrSlice []xml.Attr
+
+func (x xmlAttrSlice) Len() int {
+ return len(x)
+}
+
+func (x xmlAttrSlice) Less(i, j int) bool {
+ spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space
+ localI, localJ := x[i].Name.Local, x[j].Name.Local
+ valueI, valueJ := x[i].Value, x[j].Value
+
+ spaceCmp := strings.Compare(spaceI, spaceJ)
+ localCmp := strings.Compare(localI, localJ)
+ valueCmp := strings.Compare(valueI, valueJ)
+
+ if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) {
+ return true
+ }
+
+ return false
+}
+
+func (x xmlAttrSlice) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 000000000..7108d3800
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,291 @@
+package xmlutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalXMLError unmarshals the XML error from the stream into the value
+// type specified. The value must be a pointer. If the message fails to
+// unmarshal, the message content will be included in the returned error as a
+// awserr.UnmarshalError.
+func UnmarshalXMLError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := xml.NewDecoder(body).Decode(v)
+ if err != nil && err != io.EOF {
+ return awserr.NewUnmarshalError(err,
+ "failed to unmarshal error message", errBuf.Bytes())
+ }
+
+ return nil
+}
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := r.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := r.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 000000000..42f71648e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,159 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ child.parent = n
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ // Sort Attributes
+ attrs := node.Attr
+ if sorted {
+ sortedAttrs := make([]xml.Attr, len(attrs))
+ for _, k := range node.Attr {
+ sortedAttrs = append(sortedAttrs, k)
+ }
+ sort.Sort(xmlAttrSlice(sortedAttrs))
+ attrs = sortedAttrs
+ }
+
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: attrs})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go
new file mode 100644
index 000000000..8c889ff34
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go
@@ -0,0 +1,16231 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "fmt"
+ "net/url"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/crr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+)
+
+const opBatchGetItem = "BatchGetItem"
+
+// BatchGetItemRequest generates a "aws/request.Request" representing the
+// client's request for the BatchGetItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See BatchGetItem for more information on using the BatchGetItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the BatchGetItemRequest method.
+// req, resp := client.BatchGetItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem
+func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) {
+ op := &request.Operation{
+ Name: opBatchGetItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"RequestItems"},
+ OutputTokens: []string{"UnprocessedKeys"},
+ LimitToken: "",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &BatchGetItemInput{}
+ }
+
+ output = &BatchGetItemOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// BatchGetItem API operation for Amazon DynamoDB.
+//
+// The BatchGetItem operation returns the attributes of one or more items from
+// one or more tables. You identify requested items by primary key.
+//
+// A single operation can retrieve up to 16 MB of data, which can contain as
+// many as 100 items. BatchGetItem returns a partial result if the response
+// size limit is exceeded, the table's provisioned throughput is exceeded, or
+// an internal processing failure occurs. If a partial result is returned, the
+// operation returns a value for UnprocessedKeys. You can use this value to
+// retry the operation starting with the next item to get.
+//
+// If you request more than 100 items, BatchGetItem returns a ValidationException
+// with the message "Too many items requested for the BatchGetItem call."
+//
+// For example, if you ask to retrieve 100 items, but each individual item is
+// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB
+// limit). It also returns an appropriate UnprocessedKeys value so you can get
+// the next page of results. If desired, your application can include its own
+// logic to assemble the pages of results into one dataset.
+//
+// If none of the items can be processed due to insufficient provisioned throughput
+// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException.
+// If at least one of the items is successfully processed, then BatchGetItem
+// completes successfully, while returning the keys of the unread items in UnprocessedKeys.
+//
+// If DynamoDB returns any unprocessed items, you should retry the batch operation
+// on those items. However, we strongly recommend that you use an exponential
+// backoff algorithm. If you retry the batch operation immediately, the underlying
+// read or write requests can still fail due to throttling on the individual
+// tables. If you delay the batch operation using exponential backoff, the individual
+// requests in the batch are much more likely to succeed.
+//
+// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations)
+// in the Amazon DynamoDB Developer Guide.
+//
+// By default, BatchGetItem performs eventually consistent reads on every table
+// in the request. If you want strongly consistent reads instead, you can set
+// ConsistentRead to true for any or all tables.
+//
+// In order to minimize response latency, BatchGetItem retrieves items in parallel.
+//
+// When designing your application, keep in mind that DynamoDB does not return
+// items in any particular order. To help parse the response by item, include
+// the primary key values for the items in your request in the ProjectionExpression
+// parameter.
+//
+// If a requested item does not exist, it is not returned in the result. Requests
+// for nonexistent items consume the minimum read capacity units according to
+// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation BatchGetItem for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem
+func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) {
+ req, out := c.BatchGetItemRequest(input)
+ return out, req.Send()
+}
+
+// BatchGetItemWithContext is the same as BatchGetItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See BatchGetItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemInput, opts ...request.Option) (*BatchGetItemOutput, error) {
+ req, out := c.BatchGetItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// BatchGetItemPages iterates over the pages of a BatchGetItem operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See BatchGetItem method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a BatchGetItem operation.
+// pageNum := 0
+// err := client.BatchGetItemPages(params,
+// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error {
+ return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// BatchGetItemPagesWithContext same as BatchGetItemPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *BatchGetItemInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.BatchGetItemRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opBatchWriteItem = "BatchWriteItem"
+
+// BatchWriteItemRequest generates a "aws/request.Request" representing the
+// client's request for the BatchWriteItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See BatchWriteItem for more information on using the BatchWriteItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the BatchWriteItemRequest method.
+// req, resp := client.BatchWriteItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem
+func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) {
+ op := &request.Operation{
+ Name: opBatchWriteItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &BatchWriteItemInput{}
+ }
+
+ output = &BatchWriteItemOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// BatchWriteItem API operation for Amazon DynamoDB.
+//
+// The BatchWriteItem operation puts or deletes multiple items in one or more
+// tables. A single call to BatchWriteItem can write up to 16 MB of data, which
+// can comprise as many as 25 put or delete requests. Individual items to be
+// written can be as large as 400 KB.
+//
+// BatchWriteItem cannot update items. To update items, use the UpdateItem action.
+//
+// The individual PutItem and DeleteItem operations specified in BatchWriteItem
+// are atomic; however BatchWriteItem as a whole is not. If any requested operations
+// fail because the table's provisioned throughput is exceeded or an internal
+// processing failure occurs, the failed operations are returned in the UnprocessedItems
+// response parameter. You can investigate and optionally resend the requests.
+// Typically, you would call BatchWriteItem in a loop. Each iteration would
+// check for unprocessed items and submit a new BatchWriteItem request with
+// those unprocessed items until all items have been processed.
+//
+// If none of the items can be processed due to insufficient provisioned throughput
+// on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.
+//
+// If DynamoDB returns any unprocessed items, you should retry the batch operation
+// on those items. However, we strongly recommend that you use an exponential
+// backoff algorithm. If you retry the batch operation immediately, the underlying
+// read or write requests can still fail due to throttling on the individual
+// tables. If you delay the batch operation using exponential backoff, the individual
+// requests in the batch are much more likely to succeed.
+//
+// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations)
+// in the Amazon DynamoDB Developer Guide.
+//
+// With BatchWriteItem, you can efficiently write or delete large amounts of
+// data, such as from Amazon EMR, or copy data from another database into DynamoDB.
+// In order to improve performance with these large-scale operations, BatchWriteItem
+// does not behave in the same way as individual PutItem and DeleteItem calls
+// would. For example, you cannot specify conditions on individual put and delete
+// requests, and BatchWriteItem does not return deleted items in the response.
+//
+// If you use a programming language that supports concurrency, you can use
+// threads to write items in parallel. Your application must include the necessary
+// logic to manage the threads. With languages that don't support threading,
+// you must update or delete the specified items one at a time. In both situations,
+// BatchWriteItem performs the specified put and delete operations in parallel,
+// giving you the power of the thread pool approach without having to introduce
+// complexity into your application.
+//
+// Parallel processing reduces latency, but each specified put and delete request
+// consumes the same number of write capacity units whether it is processed
+// in parallel or not. Delete operations on nonexistent items consume one write
+// capacity unit.
+//
+// If one or more of the following is true, DynamoDB rejects the entire batch
+// write operation:
+//
+// * One or more tables specified in the BatchWriteItem request does not
+// exist.
+//
+// * Primary key attributes specified on an item in the request do not match
+// those in the corresponding table's primary key schema.
+//
+// * You try to perform multiple operations on the same item in the same
+// BatchWriteItem request. For example, you cannot put and delete the same
+// item in the same BatchWriteItem request.
+//
+// * Your request contains at least two items with identical hash and range
+// keys (which essentially is two put operations).
+//
+// * There are more than 25 requests in the batch.
+//
+// * Any individual item in a batch exceeds 400 KB.
+//
+// * The total request size exceeds 16 MB.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation BatchWriteItem for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException"
+// An item collection is too large. This exception is only returned for tables
+// that have one or more local secondary indexes.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem
+func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) {
+ req, out := c.BatchWriteItemRequest(input)
+ return out, req.Send()
+}
+
+// BatchWriteItemWithContext is the same as BatchWriteItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See BatchWriteItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) BatchWriteItemWithContext(ctx aws.Context, input *BatchWriteItemInput, opts ...request.Option) (*BatchWriteItemOutput, error) {
+ req, out := c.BatchWriteItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateBackup = "CreateBackup"
+
+// CreateBackupRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBackup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateBackup for more information on using the CreateBackup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateBackupRequest method.
+// req, resp := client.CreateBackupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup
+func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) {
+ op := &request.Operation{
+ Name: opCreateBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateBackupInput{}
+ }
+
+ output = &CreateBackupOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// CreateBackup API operation for Amazon DynamoDB.
+//
+// Creates a backup for an existing table.
+//
+// Each time you create an on-demand backup, the entire table data is backed
+// up. There is no limit to the number of on-demand backups that can be taken.
+//
+// When you create an on-demand backup, a time marker of the request is cataloged,
+// and the backup is created asynchronously, by applying all changes until the
+// time of the request to the last full table snapshot. Backup requests are
+// processed instantaneously and become available for restore within minutes.
+//
+// You can call CreateBackup at a maximum rate of 50 times per second.
+//
+// All backups in DynamoDB work without consuming any provisioned throughput
+// on the table.
+//
+// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed
+// to contain all data committed to the table up to 14:24:00, and data committed
+// after 14:26:00 will not be. The backup might contain data modifications made
+// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency.
+//
+// Along with data, the following are also included on the backups:
+//
+// * Global secondary indexes (GSIs)
+//
+// * Local secondary indexes (LSIs)
+//
+// * Streams
+//
+// * Provisioned read and write capacity
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation CreateBackup for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTableNotFoundException "TableNotFoundException"
+// A source table with the name TableName does not currently exist within the
+// subscriber's account.
+//
+// * ErrCodeTableInUseException "TableInUseException"
+// A target table with the specified name is either being created or deleted.
+//
+// * ErrCodeContinuousBackupsUnavailableException "ContinuousBackupsUnavailableException"
+// Backups have not yet been enabled for this table.
+//
+// * ErrCodeBackupInUseException "BackupInUseException"
+// There is another ongoing conflicting backup control plane operation on the
+// table. The backup is either being created, deleted or restored to a table.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup
+func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) {
+ req, out := c.CreateBackupRequest(input)
+ return out, req.Send()
+}
+
+// CreateBackupWithContext is the same as CreateBackup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBackup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) {
+ req, out := c.CreateBackupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateGlobalTable = "CreateGlobalTable"
+
+// CreateGlobalTableRequest generates a "aws/request.Request" representing the
+// client's request for the CreateGlobalTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateGlobalTable for more information on using the CreateGlobalTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateGlobalTableRequest method.
+// req, resp := client.CreateGlobalTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable
+func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) {
+ op := &request.Operation{
+ Name: opCreateGlobalTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateGlobalTableInput{}
+ }
+
+ output = &CreateGlobalTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// CreateGlobalTable API operation for Amazon DynamoDB.
+//
+// Creates a global table from an existing table. A global table creates a replication
+// relationship between two or more DynamoDB tables with the same table name
+// in the provided Regions.
+//
+// If you want to add a new replica table to a global table, each of the following
+// conditions must be true:
+//
+// * The table must have the same primary key as all of the other replicas.
+//
+// * The table must have the same name as all of the other replicas.
+//
+// * The table must have DynamoDB Streams enabled, with the stream containing
+// both the new and the old images of the item.
+//
+// * None of the replica tables in the global table can contain any data.
+//
+// If global secondary indexes are specified, then the following conditions
+// must also be met:
+//
+// * The global secondary indexes must have the same name.
+//
+// * The global secondary indexes must have the same hash key and sort key
+// (if present).
+//
+// Write capacity settings should be set consistently across your replica tables
+// and secondary indexes. DynamoDB strongly recommends enabling auto scaling
+// to manage the write capacity settings for all of your global tables replicas
+// and indexes.
+//
+// If you prefer to manage write capacity settings manually, you should provision
+// equal replicated write capacity units to your replica tables. You should
+// also provision equal replicated write capacity units to matching secondary
+// indexes across your global table.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation CreateGlobalTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// * ErrCodeGlobalTableAlreadyExistsException "GlobalTableAlreadyExistsException"
+// The specified global table already exists.
+//
+// * ErrCodeTableNotFoundException "TableNotFoundException"
+// A source table with the name TableName does not currently exist within the
+// subscriber's account.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable
+func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) {
+ req, out := c.CreateGlobalTableRequest(input)
+ return out, req.Send()
+}
+
+// CreateGlobalTableWithContext is the same as CreateGlobalTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateGlobalTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) CreateGlobalTableWithContext(ctx aws.Context, input *CreateGlobalTableInput, opts ...request.Option) (*CreateGlobalTableOutput, error) {
+ req, out := c.CreateGlobalTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateTable = "CreateTable"
+
+// CreateTableRequest generates a "aws/request.Request" representing the
+// client's request for the CreateTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateTable for more information on using the CreateTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateTableRequest method.
+// req, resp := client.CreateTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable
+func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) {
+ op := &request.Operation{
+ Name: opCreateTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateTableInput{}
+ }
+
+ output = &CreateTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// CreateTable API operation for Amazon DynamoDB.
+//
+// The CreateTable operation adds a new table to your account. In an AWS account,
+// table names must be unique within each Region. That is, you can have two
+// tables with same name if you create the tables in different Regions.
+//
+// CreateTable is an asynchronous operation. Upon receiving a CreateTable request,
+// DynamoDB immediately returns a response with a TableStatus of CREATING. After
+// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform
+// read and write operations only on an ACTIVE table.
+//
+// You can optionally define secondary indexes on the new table, as part of
+// the CreateTable operation. If you want to create multiple tables with secondary
+// indexes on them, you must create the tables sequentially. Only one table
+// with secondary indexes can be in the CREATING state at any given time.
+//
+// You can use the DescribeTable action to check the table status.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation CreateTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable
+func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) {
+ req, out := c.CreateTableRequest(input)
+ return out, req.Send()
+}
+
+// CreateTableWithContext is the same as CreateTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) {
+ req, out := c.CreateTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteBackup = "DeleteBackup"
+
+// DeleteBackupRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBackup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteBackup for more information on using the DeleteBackup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteBackupRequest method.
+// req, resp := client.DeleteBackupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup
+func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) {
+ op := &request.Operation{
+ Name: opDeleteBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteBackupInput{}
+ }
+
+ output = &DeleteBackupOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DeleteBackup API operation for Amazon DynamoDB.
+//
+// Deletes an existing backup of a table.
+//
+// You can call DeleteBackup at a maximum rate of 10 times per second.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DeleteBackup for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeBackupNotFoundException "BackupNotFoundException"
+// Backup not found for the given BackupARN.
+//
+// * ErrCodeBackupInUseException "BackupInUseException"
+// There is another ongoing conflicting backup control plane operation on the
+// table. The backup is either being created, deleted or restored to a table.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup
+func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) {
+ req, out := c.DeleteBackupRequest(input)
+ return out, req.Send()
+}
+
+// DeleteBackupWithContext is the same as DeleteBackup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBackup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) {
+ req, out := c.DeleteBackupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteItem = "DeleteItem"
+
+// DeleteItemRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteItem for more information on using the DeleteItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteItemRequest method.
+// req, resp := client.DeleteItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem
+func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) {
+ op := &request.Operation{
+ Name: opDeleteItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteItemInput{}
+ }
+
+ output = &DeleteItemOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DeleteItem API operation for Amazon DynamoDB.
+//
+// Deletes a single item in a table by primary key. You can perform a conditional
+// delete operation that deletes the item if it exists, or if it has an expected
+// attribute value.
+//
+// In addition to deleting an item, you can also return the item's attribute
+// values in the same operation, using the ReturnValues parameter.
+//
+// Unless you specify conditions, the DeleteItem is an idempotent operation;
+// running it multiple times on the same item or attribute does not result in
+// an error response.
+//
+// Conditional deletes are useful for deleting items only if specific conditions
+// are met. If those conditions are met, DynamoDB performs the delete. Otherwise,
+// the item is not deleted.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DeleteItem for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeConditionalCheckFailedException "ConditionalCheckFailedException"
+// A condition specified in the operation could not be evaluated.
+//
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException"
+// An item collection is too large. This exception is only returned for tables
+// that have one or more local secondary indexes.
+//
+// * ErrCodeTransactionConflictException "TransactionConflictException"
+// Operation was rejected because there is an ongoing transaction for the item.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem
+func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) {
+ req, out := c.DeleteItemRequest(input)
+ return out, req.Send()
+}
+
+// DeleteItemWithContext is the same as DeleteItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput, opts ...request.Option) (*DeleteItemOutput, error) {
+ req, out := c.DeleteItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteTable = "DeleteTable"
+
+// DeleteTableRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteTable for more information on using the DeleteTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteTableRequest method.
+// req, resp := client.DeleteTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable
+func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) {
+ op := &request.Operation{
+ Name: opDeleteTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteTableInput{}
+ }
+
+ output = &DeleteTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DeleteTable API operation for Amazon DynamoDB.
+//
+// The DeleteTable operation deletes a table and all of its items. After a DeleteTable
+// request, the specified table is in the DELETING state until DynamoDB completes
+// the deletion. If the table is in the ACTIVE state, you can delete it. If
+// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException.
+// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException.
+// If table is already in the DELETING state, no error is returned.
+//
+// DynamoDB might continue to accept data read and write operations, such as
+// GetItem and PutItem, on a table in the DELETING state until the table deletion
+// is complete.
+//
+// When you delete a table, any indexes on that table are also deleted.
+//
+// If you have DynamoDB Streams enabled on the table, then the corresponding
+// stream on that table goes into the DISABLED state, and the stream is automatically
+// deleted after 24 hours.
+//
+// Use the DescribeTable action to check the status of the table.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DeleteTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable
+func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) {
+ req, out := c.DeleteTableRequest(input)
+ return out, req.Send()
+}
+
+// DeleteTableWithContext is the same as DeleteTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) {
+ req, out := c.DeleteTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeBackup = "DescribeBackup"
+
+// DescribeBackupRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeBackup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeBackup for more information on using the DescribeBackup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeBackupRequest method.
+// req, resp := client.DescribeBackupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup
+func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) {
+ op := &request.Operation{
+ Name: opDescribeBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeBackupInput{}
+ }
+
+ output = &DescribeBackupOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeBackup API operation for Amazon DynamoDB.
+//
+// Describes an existing backup of a table.
+//
+// You can call DescribeBackup at a maximum rate of 10 times per second.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeBackup for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeBackupNotFoundException "BackupNotFoundException"
+// Backup not found for the given BackupARN.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup
+func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) {
+ req, out := c.DescribeBackupRequest(input)
+ return out, req.Send()
+}
+
+// DescribeBackupWithContext is the same as DescribeBackup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeBackup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeBackupWithContext(ctx aws.Context, input *DescribeBackupInput, opts ...request.Option) (*DescribeBackupOutput, error) {
+ req, out := c.DescribeBackupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeContinuousBackups = "DescribeContinuousBackups"
+
+// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeContinuousBackups operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeContinuousBackups for more information on using the DescribeContinuousBackups
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeContinuousBackupsRequest method.
+// req, resp := client.DescribeContinuousBackupsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups
+func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) {
+ op := &request.Operation{
+ Name: opDescribeContinuousBackups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeContinuousBackupsInput{}
+ }
+
+ output = &DescribeContinuousBackupsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeContinuousBackups API operation for Amazon DynamoDB.
+//
+// Checks the status of continuous backups and point in time recovery on the
+// specified table. Continuous backups are ENABLED on all tables at table creation.
+// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set
+// to ENABLED.
+//
+// After continuous backups and point in time recovery are enabled, you can
+// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.
+//
+// LatestRestorableDateTime is typically 5 minutes before the current time.
+// You can restore your table to any point in time during the last 35 days.
+//
+// You can call DescribeContinuousBackups at a maximum rate of 10 times per
+// second.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeContinuousBackups for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTableNotFoundException "TableNotFoundException"
+// A source table with the name TableName does not currently exist within the
+// subscriber's account.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups
+func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) {
+ req, out := c.DescribeContinuousBackupsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeContinuousBackupsWithContext is the same as DescribeContinuousBackups with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeContinuousBackups for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeContinuousBackupsWithContext(ctx aws.Context, input *DescribeContinuousBackupsInput, opts ...request.Option) (*DescribeContinuousBackupsOutput, error) {
+ req, out := c.DescribeContinuousBackupsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeEndpoints = "DescribeEndpoints"
+
+// DescribeEndpointsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeEndpoints operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeEndpoints for more information on using the DescribeEndpoints
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeEndpointsRequest method.
+// req, resp := client.DescribeEndpointsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints
+func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) {
+ op := &request.Operation{
+ Name: opDescribeEndpoints,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeEndpointsInput{}
+ }
+
+ output = &DescribeEndpointsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeEndpoints API operation for Amazon DynamoDB.
+//
+// Returns the regional endpoint information.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeEndpoints for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints
+func (c *DynamoDB) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) {
+ req, out := c.DescribeEndpointsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeEndpoints for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) {
+ req, out := c.DescribeEndpointsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+type discovererDescribeEndpoints struct {
+ Client *DynamoDB
+ Required bool
+ EndpointCache *crr.EndpointCache
+ Params map[string]*string
+ Key string
+}
+
+func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) {
+ input := &DescribeEndpointsInput{}
+
+ resp, err := d.Client.DescribeEndpoints(input)
+ if err != nil {
+ return crr.Endpoint{}, err
+ }
+
+ endpoint := crr.Endpoint{
+ Key: d.Key,
+ }
+
+ for _, e := range resp.Endpoints {
+ if e.Address == nil {
+ continue
+ }
+
+ cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes)
+ u, err := url.Parse(*e.Address)
+ if err != nil {
+ continue
+ }
+
+ addr := crr.WeightedAddress{
+ URL: u,
+ Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute),
+ }
+
+ endpoint.Add(addr)
+ }
+
+ d.EndpointCache.Add(endpoint)
+
+ return endpoint, nil
+}
+
+func (d *discovererDescribeEndpoints) Handler(r *request.Request) {
+ endpointKey := crr.BuildEndpointKey(d.Params)
+ d.Key = endpointKey
+
+ endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required)
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ if endpoint.URL != nil && len(endpoint.URL.String()) > 0 {
+ r.HTTPRequest.URL = endpoint.URL
+ }
+}
+
+const opDescribeGlobalTable = "DescribeGlobalTable"
+
+// DescribeGlobalTableRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeGlobalTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeGlobalTable for more information on using the DescribeGlobalTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeGlobalTableRequest method.
+// req, resp := client.DescribeGlobalTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable
+func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) {
+ op := &request.Operation{
+ Name: opDescribeGlobalTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeGlobalTableInput{}
+ }
+
+ output = &DescribeGlobalTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeGlobalTable API operation for Amazon DynamoDB.
+//
+// Returns information about the specified global table.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeGlobalTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException"
+// The specified global table does not exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable
+func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) {
+ req, out := c.DescribeGlobalTableRequest(input)
+ return out, req.Send()
+}
+
+// DescribeGlobalTableWithContext is the same as DescribeGlobalTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeGlobalTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeGlobalTableWithContext(ctx aws.Context, input *DescribeGlobalTableInput, opts ...request.Option) (*DescribeGlobalTableOutput, error) {
+ req, out := c.DescribeGlobalTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings"
+
+// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeGlobalTableSettings operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeGlobalTableSettings for more information on using the DescribeGlobalTableSettings
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeGlobalTableSettingsRequest method.
+// req, resp := client.DescribeGlobalTableSettingsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings
+func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) {
+ op := &request.Operation{
+ Name: opDescribeGlobalTableSettings,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeGlobalTableSettingsInput{}
+ }
+
+ output = &DescribeGlobalTableSettingsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeGlobalTableSettings API operation for Amazon DynamoDB.
+//
+// Describes Region-specific settings for a global table.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeGlobalTableSettings for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException"
+// The specified global table does not exist.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings
+func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) {
+ req, out := c.DescribeGlobalTableSettingsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeGlobalTableSettingsWithContext is the same as DescribeGlobalTableSettings with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeGlobalTableSettings for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input *DescribeGlobalTableSettingsInput, opts ...request.Option) (*DescribeGlobalTableSettingsOutput, error) {
+ req, out := c.DescribeGlobalTableSettingsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeLimits = "DescribeLimits"
+
+// DescribeLimitsRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeLimits operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeLimits for more information on using the DescribeLimits
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeLimitsRequest method.
+// req, resp := client.DescribeLimitsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits
+func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) {
+ op := &request.Operation{
+ Name: opDescribeLimits,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeLimitsInput{}
+ }
+
+ output = &DescribeLimitsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeLimits API operation for Amazon DynamoDB.
+//
+// Returns the current provisioned-capacity limits for your AWS account in a
+// Region, both for the Region as a whole and for any one DynamoDB table that
+// you create there.
+//
+// When you establish an AWS account, the account has initial limits on the
+// maximum read capacity units and write capacity units that you can provision
+// across all of your DynamoDB tables in a given Region. Also, there are per-table
+// limits that apply when you create a table there. For more information, see
+// Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+// page in the Amazon DynamoDB Developer Guide.
+//
+// Although you can increase these limits by filing a case at AWS Support Center
+// (https://console.aws.amazon.com/support/home#/), obtaining the increase is
+// not instantaneous. The DescribeLimits action lets you write code to compare
+// the capacity you are currently using to those limits imposed by your account
+// so that you have enough time to apply for an increase before you hit a limit.
+//
+// For example, you could use one of the AWS SDKs to do the following:
+//
+// Call DescribeLimits for a particular Region to obtain your current account
+// limits on provisioned capacity there.
+//
+// Create a variable to hold the aggregate read capacity units provisioned for
+// all your tables in that Region, and one to hold the aggregate write capacity
+// units. Zero them both.
+//
+// Call ListTables to obtain a list of all your DynamoDB tables.
+//
+// For each table name listed by ListTables, do the following:
+//
+// * Call DescribeTable with the table name.
+//
+// * Use the data returned by DescribeTable to add the read capacity units
+// and write capacity units provisioned for the table itself to your variables.
+//
+// * If the table has one or more global secondary indexes (GSIs), loop over
+// these GSIs and add their provisioned capacity values to your variables
+// as well.
+//
+// Report the account limits for that Region returned by DescribeLimits, along
+// with the total current provisioned capacity levels you have calculated.
+//
+// This will let you see whether you are getting close to your account-level
+// limits.
+//
+// The per-table limits apply only when you are creating a new table. They restrict
+// the sum of the provisioned capacity of the new table itself and all its global
+// secondary indexes.
+//
+// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned
+// capacity extremely rapidly. But the only upper limit that applies is that
+// the aggregate provisioned capacity over all your tables and GSIs cannot exceed
+// either of the per-account limits.
+//
+// DescribeLimits should only be called periodically. You can expect throttling
+// errors if you call it more than once in a minute.
+//
+// The DescribeLimits Request element has no content.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeLimits for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits
+func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) {
+ req, out := c.DescribeLimitsRequest(input)
+ return out, req.Send()
+}
+
+// DescribeLimitsWithContext is the same as DescribeLimits with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeLimits for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeLimitsWithContext(ctx aws.Context, input *DescribeLimitsInput, opts ...request.Option) (*DescribeLimitsOutput, error) {
+ req, out := c.DescribeLimitsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeTable = "DescribeTable"
+
+// DescribeTableRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeTable for more information on using the DescribeTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeTableRequest method.
+// req, resp := client.DescribeTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable
+func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) {
+ op := &request.Operation{
+ Name: opDescribeTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeTableInput{}
+ }
+
+ output = &DescribeTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeTable API operation for Amazon DynamoDB.
+//
+// Returns information about the table, including the current status of the
+// table, when it was created, the primary key schema, and any indexes on the
+// table.
+//
+// If you issue a DescribeTable request immediately after a CreateTable request,
+// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable
+// uses an eventually consistent query, and the metadata for your table might
+// not be available at that moment. Wait for a few seconds, and then try the
+// DescribeTable request again.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable
+func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) {
+ req, out := c.DescribeTableRequest(input)
+ return out, req.Send()
+}
+
+// DescribeTableWithContext is the same as DescribeTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeTableWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.Option) (*DescribeTableOutput, error) {
+ req, out := c.DescribeTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeTimeToLive = "DescribeTimeToLive"
+
+// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeTimeToLive operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeTimeToLive for more information on using the DescribeTimeToLive
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeTimeToLiveRequest method.
+// req, resp := client.DescribeTimeToLiveRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive
+func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) {
+ op := &request.Operation{
+ Name: opDescribeTimeToLive,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DescribeTimeToLiveInput{}
+ }
+
+ output = &DescribeTimeToLiveOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// DescribeTimeToLive API operation for Amazon DynamoDB.
+//
+// Gives a description of the Time to Live (TTL) status on the specified table.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation DescribeTimeToLive for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive
+func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) {
+ req, out := c.DescribeTimeToLiveRequest(input)
+ return out, req.Send()
+}
+
+// DescribeTimeToLiveWithContext is the same as DescribeTimeToLive with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeTimeToLive for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *DescribeTimeToLiveInput, opts ...request.Option) (*DescribeTimeToLiveOutput, error) {
+ req, out := c.DescribeTimeToLiveRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetItem = "GetItem"
+
+// GetItemRequest generates a "aws/request.Request" representing the
+// client's request for the GetItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetItem for more information on using the GetItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetItemRequest method.
+// req, resp := client.GetItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem
+func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) {
+ op := &request.Operation{
+ Name: opGetItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetItemInput{}
+ }
+
+ output = &GetItemOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// GetItem API operation for Amazon DynamoDB.
+//
+// The GetItem operation returns a set of attributes for the item with the given
+// primary key. If there is no matching item, GetItem does not return any data
+// and there will be no Item element in the response.
+//
+// GetItem provides an eventually consistent read by default. If your application
+// requires a strongly consistent read, set ConsistentRead to true. Although
+// a strongly consistent read might take more time than an eventually consistent
+// read, it always returns the last updated value.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation GetItem for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem
+func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) {
+ req, out := c.GetItemRequest(input)
+ return out, req.Send()
+}
+
+// GetItemWithContext is the same as GetItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) {
+ req, out := c.GetItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListBackups = "ListBackups"
+
+// ListBackupsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBackups operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListBackups for more information on using the ListBackups
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListBackupsRequest method.
+// req, resp := client.ListBackupsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups
+func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) {
+ op := &request.Operation{
+ Name: opListBackups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBackupsInput{}
+ }
+
+ output = &ListBackupsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// ListBackups API operation for Amazon DynamoDB.
+//
+// List backups associated with an AWS account. To list backups for a given
+// table, specify TableName. ListBackups returns a paginated list of results
+// with at most 1 MB worth of items in a page. You can also specify a limit
+// for the maximum number of entries to be returned in a page.
+//
+// In the request, start time is inclusive, but end time is exclusive. Note
+// that these limits are for the time at which the original backup was requested.
+//
+// You can call ListBackups a maximum of five times per second.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation ListBackups for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups
+func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) {
+ req, out := c.ListBackupsRequest(input)
+ return out, req.Send()
+}
+
+// ListBackupsWithContext is the same as ListBackups with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBackups for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) {
+ req, out := c.ListBackupsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListGlobalTables = "ListGlobalTables"
+
+// ListGlobalTablesRequest generates a "aws/request.Request" representing the
+// client's request for the ListGlobalTables operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListGlobalTables for more information on using the ListGlobalTables
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListGlobalTablesRequest method.
+// req, resp := client.ListGlobalTablesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables
+func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) {
+ op := &request.Operation{
+ Name: opListGlobalTables,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListGlobalTablesInput{}
+ }
+
+ output = &ListGlobalTablesOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// ListGlobalTables API operation for Amazon DynamoDB.
+//
+// Lists all global tables that have a replica in the specified Region.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation ListGlobalTables for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables
+func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) {
+ req, out := c.ListGlobalTablesRequest(input)
+ return out, req.Send()
+}
+
+// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListGlobalTables for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) {
+ req, out := c.ListGlobalTablesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListTables = "ListTables"
+
+// ListTablesRequest generates a "aws/request.Request" representing the
+// client's request for the ListTables operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListTables for more information on using the ListTables
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListTablesRequest method.
+// req, resp := client.ListTablesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables
+func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) {
+ op := &request.Operation{
+ Name: opListTables,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ExclusiveStartTableName"},
+ OutputTokens: []string{"LastEvaluatedTableName"},
+ LimitToken: "Limit",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListTablesInput{}
+ }
+
+ output = &ListTablesOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// ListTables API operation for Amazon DynamoDB.
+//
+// Returns an array of table names associated with the current account and endpoint.
+// The output from ListTables is paginated, with each page returning a maximum
+// of 100 table names.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation ListTables for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables
+func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) {
+ req, out := c.ListTablesRequest(input)
+ return out, req.Send()
+}
+
+// ListTablesWithContext is the same as ListTables with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListTables for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) {
+ req, out := c.ListTablesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListTablesPages iterates over the pages of a ListTables operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListTables method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListTables operation.
+// pageNum := 0
+// err := client.ListTablesPages(params,
+// func(page *dynamodb.ListTablesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error {
+ return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListTablesPagesWithContext same as ListTablesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListTablesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListTablesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListTablesOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListTagsOfResource = "ListTagsOfResource"
+
+// ListTagsOfResourceRequest generates a "aws/request.Request" representing the
+// client's request for the ListTagsOfResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListTagsOfResource for more information on using the ListTagsOfResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListTagsOfResourceRequest method.
+// req, resp := client.ListTagsOfResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource
+func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) {
+ op := &request.Operation{
+ Name: opListTagsOfResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListTagsOfResourceInput{}
+ }
+
+ output = &ListTagsOfResourceOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// ListTagsOfResource API operation for Amazon DynamoDB.
+//
+// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource
+// up to 10 times per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation ListTagsOfResource for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource
+func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) {
+ req, out := c.ListTagsOfResourceRequest(input)
+ return out, req.Send()
+}
+
+// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListTagsOfResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) {
+ req, out := c.ListTagsOfResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutItem = "PutItem"
+
+// PutItemRequest generates a "aws/request.Request" representing the
+// client's request for the PutItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutItem for more information on using the PutItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutItemRequest method.
+// req, resp := client.PutItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem
+func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) {
+ op := &request.Operation{
+ Name: opPutItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutItemInput{}
+ }
+
+ output = &PutItemOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// PutItem API operation for Amazon DynamoDB.
+//
+// Creates a new item, or replaces an old item with a new item. If an item that
+// has the same primary key as the new item already exists in the specified
+// table, the new item completely replaces the existing item. You can perform
+// a conditional put operation (add a new item if one with the specified primary
+// key doesn't exist), or replace an existing item if it has certain attribute
+// values. You can return the item's attribute values in the same operation,
+// using the ReturnValues parameter.
+//
+// This topic provides general information about the PutItem API.
+//
+// For information on how to call the PutItem API using the AWS SDK in specific
+// languages, see the following:
+//
+// * PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem)
+//
+// * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem)
+//
+// When you add an item, the primary key attributes are the only required attributes.
+// Attribute values cannot be null. String and Binary type attributes must have
+// lengths greater than zero. Set type attributes cannot be empty. Requests
+// with empty values will be rejected with a ValidationException exception.
+//
+// To prevent a new item from replacing an existing item, use a conditional
+// expression that contains the attribute_not_exists function with the name
+// of the attribute being used as the partition key for the table. Since every
+// record must contain that attribute, the attribute_not_exists function will
+// only succeed if no matching item exists.
+//
+// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation PutItem for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeConditionalCheckFailedException "ConditionalCheckFailedException"
+// A condition specified in the operation could not be evaluated.
+//
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException"
+// An item collection is too large. This exception is only returned for tables
+// that have one or more local secondary indexes.
+//
+// * ErrCodeTransactionConflictException "TransactionConflictException"
+// Operation was rejected because there is an ongoing transaction for the item.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem
+func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) {
+ req, out := c.PutItemRequest(input)
+ return out, req.Send()
+}
+
+// PutItemWithContext is the same as PutItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) {
+ req, out := c.PutItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opQuery = "Query"
+
+// QueryRequest generates a "aws/request.Request" representing the
+// client's request for the Query operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See Query for more information on using the Query
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the QueryRequest method.
+// req, resp := client.QueryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query
+func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) {
+ op := &request.Operation{
+ Name: opQuery,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ExclusiveStartKey"},
+ OutputTokens: []string{"LastEvaluatedKey"},
+ LimitToken: "Limit",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &QueryInput{}
+ }
+
+ output = &QueryOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// Query API operation for Amazon DynamoDB.
+//
+// The Query operation finds items based on primary key values. You can query
+// any table or secondary index that has a composite primary key (a partition
+// key and a sort key).
+//
+// Use the KeyConditionExpression parameter to provide a specific value for
+// the partition key. The Query operation will return all of the items from
+// the table or index with that partition key value. You can optionally narrow
+// the scope of the Query operation by specifying a sort key value and a comparison
+// operator in KeyConditionExpression. To further refine the Query results,
+// you can optionally provide a FilterExpression. A FilterExpression determines
+// which items within the results should be returned to you. All of the other
+// results are discarded.
+//
+// A Query operation always returns a result set. If no matching items are found,
+// the result set will be empty. Queries that do not return results consume
+// the minimum number of read capacity units for that type of read operation.
+//
+// DynamoDB calculates the number of read capacity units consumed based on item
+// size, not on the amount of data that is returned to an application. The number
+// of capacity units consumed will be the same whether you request all of the
+// attributes (the default behavior) or just some of them (using a projection
+// expression). The number will also be the same whether or not you use a FilterExpression.
+//
+// Query results are always sorted by the sort key value. If the data type of
+// the sort key is Number, the results are returned in numeric order; otherwise,
+// the results are returned in order of UTF-8 bytes. By default, the sort order
+// is ascending. To reverse the order, set the ScanIndexForward parameter to
+// false.
+//
+// A single Query operation will read up to the maximum number of items set
+// (if using the Limit parameter) or a maximum of 1 MB of data and then apply
+// any filtering to the results using FilterExpression. If LastEvaluatedKey
+// is present in the response, you will need to paginate the result set. For
+// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination)
+// in the Amazon DynamoDB Developer Guide.
+//
+// FilterExpression is applied after a Query finishes, but before the results
+// are returned. A FilterExpression cannot contain partition key or sort key
+// attributes. You need to specify those attributes in the KeyConditionExpression.
+//
+// A Query operation can return an empty result set and a LastEvaluatedKey if
+// all the items read for the page of results are filtered out.
+//
+// You can query a table, a local secondary index, or a global secondary index.
+// For a query on a table or on a local secondary index, you can set the ConsistentRead
+// parameter to true and obtain a strongly consistent result. Global secondary
+// indexes support eventually consistent reads only, so do not specify ConsistentRead
+// when querying a global secondary index.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation Query for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query
+func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) {
+ req, out := c.QueryRequest(input)
+ return out, req.Send()
+}
+
+// QueryWithContext is the same as Query with the addition of
+// the ability to pass a context and additional request options.
+//
+// See Query for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) {
+ req, out := c.QueryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// QueryPages iterates over the pages of a Query operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See Query method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a Query operation.
+// pageNum := 0
+// err := client.QueryPages(params,
+// func(page *dynamodb.QueryOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error {
+ return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// QueryPagesWithContext same as QueryPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *QueryInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.QueryRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*QueryOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opRestoreTableFromBackup = "RestoreTableFromBackup"
+
+// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the
+// client's request for the RestoreTableFromBackup operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RestoreTableFromBackupRequest method.
+// req, resp := client.RestoreTableFromBackupRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup
+func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) {
+ op := &request.Operation{
+ Name: opRestoreTableFromBackup,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RestoreTableFromBackupInput{}
+ }
+
+ output = &RestoreTableFromBackupOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// RestoreTableFromBackup API operation for Amazon DynamoDB.
+//
+// Creates a new table from an existing backup. Any number of users can execute
+// up to 4 concurrent restores (any type of restore) in a given account.
+//
+// You can call RestoreTableFromBackup at a maximum rate of 10 times per second.
+//
+// You must manually set up the following on the restored table:
+//
+// * Auto scaling policies
+//
+// * IAM policies
+//
+// * Amazon CloudWatch metrics and alarms
+//
+// * Tags
+//
+// * Stream settings
+//
+// * Time to Live (TTL) settings
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation RestoreTableFromBackup for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTableAlreadyExistsException "TableAlreadyExistsException"
+// A target table with the specified name already exists.
+//
+// * ErrCodeTableInUseException "TableInUseException"
+// A target table with the specified name is either being created or deleted.
+//
+// * ErrCodeBackupNotFoundException "BackupNotFoundException"
+// Backup not found for the given BackupARN.
+//
+// * ErrCodeBackupInUseException "BackupInUseException"
+// There is another ongoing conflicting backup control plane operation on the
+// table. The backup is either being created, deleted or restored to a table.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup
+func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) {
+ req, out := c.RestoreTableFromBackupRequest(input)
+ return out, req.Send()
+}
+
+// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreTableFromBackup for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) {
+ req, out := c.RestoreTableFromBackupRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRestoreTableToPointInTime = "RestoreTableToPointInTime"
+
+// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the
+// client's request for the RestoreTableToPointInTime operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RestoreTableToPointInTimeRequest method.
+// req, resp := client.RestoreTableToPointInTimeRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime
+func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) {
+ op := &request.Operation{
+ Name: opRestoreTableToPointInTime,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RestoreTableToPointInTimeInput{}
+ }
+
+ output = &RestoreTableToPointInTimeOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// RestoreTableToPointInTime API operation for Amazon DynamoDB.
+//
+// Restores the specified table to the specified point in time within EarliestRestorableDateTime
+// and LatestRestorableDateTime. You can restore your table to any point in
+// time during the last 35 days. Any number of users can execute up to 4 concurrent
+// restores (any type of restore) in a given account.
+//
+// When you restore using point in time recovery, DynamoDB restores your table
+// data to the state based on the selected date and time (day:hour:minute:second)
+// to a new table.
+//
+// Along with data, the following are also included on the new restored table
+// using point in time recovery:
+//
+// * Global secondary indexes (GSIs)
+//
+// * Local secondary indexes (LSIs)
+//
+// * Provisioned read and write capacity
+//
+// * Encryption settings All these settings come from the current settings
+// of the source table at the time of restore.
+//
+// You must manually set up the following on the restored table:
+//
+// * Auto scaling policies
+//
+// * IAM policies
+//
+// * Amazon CloudWatch metrics and alarms
+//
+// * Tags
+//
+// * Stream settings
+//
+// * Time to Live (TTL) settings
+//
+// * Point in time recovery settings
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation RestoreTableToPointInTime for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTableAlreadyExistsException "TableAlreadyExistsException"
+// A target table with the specified name already exists.
+//
+// * ErrCodeTableNotFoundException "TableNotFoundException"
+// A source table with the name TableName does not currently exist within the
+// subscriber's account.
+//
+// * ErrCodeTableInUseException "TableInUseException"
+// A target table with the specified name is either being created or deleted.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInvalidRestoreTimeException "InvalidRestoreTimeException"
+// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
+// and LatestRestorableDateTime.
+//
+// * ErrCodePointInTimeRecoveryUnavailableException "PointInTimeRecoveryUnavailableException"
+// Point in time recovery has not yet been enabled for this source table.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime
+func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) {
+ req, out := c.RestoreTableToPointInTimeRequest(input)
+ return out, req.Send()
+}
+
+// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreTableToPointInTime for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) {
+ req, out := c.RestoreTableToPointInTimeRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opScan = "Scan"
+
+// ScanRequest generates a "aws/request.Request" representing the
+// client's request for the Scan operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See Scan for more information on using the Scan
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ScanRequest method.
+// req, resp := client.ScanRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan
+func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) {
+ op := &request.Operation{
+ Name: opScan,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"ExclusiveStartKey"},
+ OutputTokens: []string{"LastEvaluatedKey"},
+ LimitToken: "Limit",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ScanInput{}
+ }
+
+ output = &ScanOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// Scan API operation for Amazon DynamoDB.
+//
+// The Scan operation returns one or more items and item attributes by accessing
+// every item in a table or a secondary index. To have DynamoDB return fewer
+// items, you can provide a FilterExpression operation.
+//
+// If the total number of scanned items exceeds the maximum dataset size limit
+// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey
+// value to continue the scan in a subsequent operation. The results also include
+// the number of items exceeding the limit. A scan can result in no table data
+// meeting the filter criteria.
+//
+// A single Scan operation reads up to the maximum number of items set (if using
+// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering
+// to the results using FilterExpression. If LastEvaluatedKey is present in
+// the response, you need to paginate the result set. For more information,
+// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Scan operations proceed sequentially; however, for faster performance on
+// a large table or secondary index, applications can request a parallel Scan
+// operation by providing the Segment and TotalSegments parameters. For more
+// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Scan uses eventually consistent reads when accessing the data in a table;
+// therefore, the result set might not include the changes to data in the table
+// immediately before the operation began. If you need a consistent copy of
+// the data, as of the time that the Scan begins, you can set the ConsistentRead
+// parameter to true.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation Scan for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan
+func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) {
+ req, out := c.ScanRequest(input)
+ return out, req.Send()
+}
+
+// ScanWithContext is the same as Scan with the addition of
+// the ability to pass a context and additional request options.
+//
+// See Scan for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) {
+ req, out := c.ScanRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ScanPages iterates over the pages of a Scan operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See Scan method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a Scan operation.
+// pageNum := 0
+// err := client.ScanPages(params,
+// func(page *dynamodb.ScanOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error {
+ return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ScanPagesWithContext same as ScanPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ScanInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ScanRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ScanOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opTagResource = "TagResource"
+
+// TagResourceRequest generates a "aws/request.Request" representing the
+// client's request for the TagResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TagResource for more information on using the TagResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TagResourceRequest method.
+// req, resp := client.TagResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource
+func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
+ op := &request.Operation{
+ Name: opTagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TagResourceInput{}
+ }
+
+ output = &TagResourceOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// TagResource API operation for Amazon DynamoDB.
+//
+// Associate a set of tags with an Amazon DynamoDB resource. You can then activate
+// these user-defined tags so that they appear on the Billing and Cost Management
+// console for cost allocation tracking. You can call TagResource up to five
+// times per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation TagResource for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource
+func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
+ req, out := c.TagResourceRequest(input)
+ return out, req.Send()
+}
+
+// TagResourceWithContext is the same as TagResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TagResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
+ req, out := c.TagResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opTransactGetItems = "TransactGetItems"
+
+// TransactGetItemsRequest generates a "aws/request.Request" representing the
+// client's request for the TransactGetItems operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TransactGetItems for more information on using the TransactGetItems
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TransactGetItemsRequest method.
+// req, resp := client.TransactGetItemsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems
+func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) {
+ op := &request.Operation{
+ Name: opTransactGetItems,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TransactGetItemsInput{}
+ }
+
+ output = &TransactGetItemsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// TransactGetItems API operation for Amazon DynamoDB.
+//
+// TransactGetItems is a synchronous operation that atomically retrieves multiple
+// items from one or more tables (but not from indexes) in a single account
+// and Region. A TransactGetItems call can contain up to 25 TransactGetItem
+// objects, each of which contains a Get structure that specifies an item to
+// retrieve from a table in the account and Region. A call to TransactGetItems
+// cannot retrieve items from tables in more than one AWS account or Region.
+// The aggregate size of the items in the transaction cannot exceed 4 MB.
+//
+// All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction
+// with up to 4 MB of data, except the following AWS Regions:
+//
+// * China (Beijing)
+//
+// * China (Ningxia)
+//
+// The China (Beijing) and China (Ningxia) Regions support up to 10 items per
+// transaction with up to 4 MB of data.
+//
+// DynamoDB rejects the entire TransactGetItems request if any of the following
+// is true:
+//
+// * A conflicting operation is in the process of updating an item to be
+// read.
+//
+// * There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// * There is a user error, such as an invalid data format.
+//
+// * The aggregate size of the items in the transaction cannot exceed 4 MB.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation TransactGetItems for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeTransactionCanceledException "TransactionCanceledException"
+// The entire transaction request was canceled.
+//
+// DynamoDB cancels a TransactWriteItems request under the following circumstances:
+//
+// * A condition in one of the condition expressions is not met.
+//
+// * A table in the TransactWriteItems request is in a different account
+// or region.
+//
+// * More than one action in the TransactWriteItems operation targets the
+// same item.
+//
+// * There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// * An item size becomes too large (larger than 400 KB), or a local secondary
+// index (LSI) becomes too large, or a similar validation error occurs because
+// of changes made by the transaction.
+//
+// * The aggregate size of the items in the transaction exceeds 4 MBs.
+//
+// * There is a user error, such as an invalid data format.
+//
+// DynamoDB cancels a TransactGetItems request under the following circumstances:
+//
+// * There is an ongoing TransactGetItems operation that conflicts with a
+// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
+// In this case the TransactGetItems operation fails with a TransactionCanceledException.
+//
+// * A table in the TransactGetItems request is in a different account or
+// region.
+//
+// * There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// * The aggregate size of the items in the transaction exceeds 4 MBs.
+//
+// * There is a user error, such as an invalid data format.
+//
+// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
+// property. This property is not set for other languages. Transaction cancellation
+// reasons are ordered in the order of requested items, if an item has no error
+// it will have NONE code and Null message.
+//
+// Cancellation reason codes and possible error messages:
+//
+// * No Errors: Code: NONE Message: null
+//
+// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The
+// conditional request failed.
+//
+// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
+// Message: Collection size exceeded.
+//
+// * Transaction Conflict: Code: TransactionConflict Message: Transaction
+// is ongoing for the item.
+//
+// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
+// Messages: The level of configured provisioned throughput for the table
+// was exceeded. Consider increasing your provisioning level with the UpdateTable
+// API. This Message is received when provisioned throughput is exceeded
+// is on a provisioned DynamoDB table. The level of configured provisioned
+// throughput for one or more global secondary indexes of the table was exceeded.
+// Consider increasing your provisioning level for the under-provisioned
+// global secondary indexes with the UpdateTable API. This message is returned
+// when provisioned throughput is exceeded is on a provisioned GSI.
+//
+// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
+// the current capacity of your table or index. DynamoDB is automatically
+// scaling your table or index so please try again shortly. If exceptions
+// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
+// This message is returned when writes get throttled on an On-Demand table
+// as DynamoDB is automatically scaling the table. Throughput exceeds the
+// current capacity for one or more global secondary indexes. DynamoDB is
+// automatically scaling your index so please try again shortly. This message
+// is returned when when writes get throttled on an On-Demand GSI as DynamoDB
+// is automatically scaling the GSI.
+//
+// * Validation Error: Code: ValidationError Messages: One or more parameter
+// values were invalid. The update expression attempted to update the secondary
+// index key beyond allowed size limits. The update expression attempted
+// to update the secondary index key to unsupported type. An operand in the
+// update expression has an incorrect data type. Item size to update has
+// exceeded the maximum allowed size. Number overflow. Attempting to store
+// a number with magnitude larger than supported range. Type mismatch for
+// attribute to update. Nesting Levels have exceeded supported limits. The
+// document path provided in the update expression is invalid for update.
+// The provided expression refers to an attribute that does not exist in
+// the item.
+//
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems
+func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) {
+ req, out := c.TransactGetItemsRequest(input)
+ return out, req.Send()
+}
+
+// TransactGetItemsWithContext is the same as TransactGetItems with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TransactGetItems for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) {
+ req, out := c.TransactGetItemsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opTransactWriteItems = "TransactWriteItems"
+
+// TransactWriteItemsRequest generates a "aws/request.Request" representing the
+// client's request for the TransactWriteItems operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TransactWriteItems for more information on using the TransactWriteItems
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TransactWriteItemsRequest method.
+// req, resp := client.TransactWriteItemsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems
+func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) {
+ op := &request.Operation{
+ Name: opTransactWriteItems,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TransactWriteItemsInput{}
+ }
+
+ output = &TransactWriteItemsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// TransactWriteItems API operation for Amazon DynamoDB.
+//
+// TransactWriteItems is a synchronous write operation that groups up to 25
+// action requests. These actions can target items in different tables, but
+// not in different AWS accounts or Regions, and no two actions can target the
+// same item. For example, you cannot both ConditionCheck and Update the same
+// item. The aggregate size of the items in the transaction cannot exceed 4
+// MB.
+//
+// All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction
+// with up to 4 MB of data, except the following AWS Regions:
+//
+// * China (Beijing)
+//
+// * China (Ningxia)
+//
+// The China (Beijing) and China (Ningxia) Regions support up to 10 items per
+// transaction with up to 4 MB of data.
+//
+// The actions are completed atomically so that either all of them succeed,
+// or all of them fail. They are defined by the following objects:
+//
+// * Put — Initiates a PutItem operation to write a new item. This structure
+// specifies the primary key of the item to be written, the name of the table
+// to write it in, an optional condition expression that must be satisfied
+// for the write to succeed, a list of the item's attributes, and a field
+// indicating whether to retrieve the item's attributes if the condition
+// is not met.
+//
+// * Update — Initiates an UpdateItem operation to update an existing item.
+// This structure specifies the primary key of the item to be updated, the
+// name of the table where it resides, an optional condition expression that
+// must be satisfied for the update to succeed, an expression that defines
+// one or more attributes to be updated, and a field indicating whether to
+// retrieve the item's attributes if the condition is not met.
+//
+// * Delete — Initiates a DeleteItem operation to delete an existing item.
+// This structure specifies the primary key of the item to be deleted, the
+// name of the table where it resides, an optional condition expression that
+// must be satisfied for the deletion to succeed, and a field indicating
+// whether to retrieve the item's attributes if the condition is not met.
+//
+// * ConditionCheck — Applies a condition to an item that is not being
+// modified by the transaction. This structure specifies the primary key
+// of the item to be checked, the name of the table where it resides, a condition
+// expression that must be satisfied for the transaction to succeed, and
+// a field indicating whether to retrieve the item's attributes if the condition
+// is not met.
+//
+// DynamoDB rejects the entire TransactWriteItems request if any of the following
+// is true:
+//
+// * A condition in one of the condition expressions is not met.
+//
+// * An ongoing operation is in the process of updating the same item.
+//
+// * There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// * An item size becomes too large (bigger than 400 KB), a local secondary
+// index (LSI) becomes too large, or a similar validation error occurs because
+// of changes made by the transaction.
+//
+// * The aggregate size of the items in the transaction exceeds 4 MB.
+//
+// * There is a user error, such as an invalid data format.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation TransactWriteItems for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeTransactionCanceledException "TransactionCanceledException"
+// The entire transaction request was canceled.
+//
+// DynamoDB cancels a TransactWriteItems request under the following circumstances:
+//
+// * A condition in one of the condition expressions is not met.
+//
+// * A table in the TransactWriteItems request is in a different account
+// or region.
+//
+// * More than one action in the TransactWriteItems operation targets the
+// same item.
+//
+// * There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// * An item size becomes too large (larger than 400 KB), or a local secondary
+// index (LSI) becomes too large, or a similar validation error occurs because
+// of changes made by the transaction.
+//
+// * The aggregate size of the items in the transaction exceeds 4 MBs.
+//
+// * There is a user error, such as an invalid data format.
+//
+// DynamoDB cancels a TransactGetItems request under the following circumstances:
+//
+// * There is an ongoing TransactGetItems operation that conflicts with a
+// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
+// In this case the TransactGetItems operation fails with a TransactionCanceledException.
+//
+// * A table in the TransactGetItems request is in a different account or
+// region.
+//
+// * There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// * The aggregate size of the items in the transaction exceeds 4 MBs.
+//
+// * There is a user error, such as an invalid data format.
+//
+// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
+// property. This property is not set for other languages. Transaction cancellation
+// reasons are ordered in the order of requested items, if an item has no error
+// it will have NONE code and Null message.
+//
+// Cancellation reason codes and possible error messages:
+//
+// * No Errors: Code: NONE Message: null
+//
+// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The
+// conditional request failed.
+//
+// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
+// Message: Collection size exceeded.
+//
+// * Transaction Conflict: Code: TransactionConflict Message: Transaction
+// is ongoing for the item.
+//
+// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
+// Messages: The level of configured provisioned throughput for the table
+// was exceeded. Consider increasing your provisioning level with the UpdateTable
+// API. This Message is received when provisioned throughput is exceeded
+// is on a provisioned DynamoDB table. The level of configured provisioned
+// throughput for one or more global secondary indexes of the table was exceeded.
+// Consider increasing your provisioning level for the under-provisioned
+// global secondary indexes with the UpdateTable API. This message is returned
+// when provisioned throughput is exceeded is on a provisioned GSI.
+//
+// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
+// the current capacity of your table or index. DynamoDB is automatically
+// scaling your table or index so please try again shortly. If exceptions
+// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
+// This message is returned when writes get throttled on an On-Demand table
+// as DynamoDB is automatically scaling the table. Throughput exceeds the
+// current capacity for one or more global secondary indexes. DynamoDB is
+// automatically scaling your index so please try again shortly. This message
+// is returned when when writes get throttled on an On-Demand GSI as DynamoDB
+// is automatically scaling the GSI.
+//
+// * Validation Error: Code: ValidationError Messages: One or more parameter
+// values were invalid. The update expression attempted to update the secondary
+// index key beyond allowed size limits. The update expression attempted
+// to update the secondary index key to unsupported type. An operand in the
+// update expression has an incorrect data type. Item size to update has
+// exceeded the maximum allowed size. Number overflow. Attempting to store
+// a number with magnitude larger than supported range. Type mismatch for
+// attribute to update. Nesting Levels have exceeded supported limits. The
+// document path provided in the update expression is invalid for update.
+// The provided expression refers to an attribute that does not exist in
+// the item.
+//
+// * ErrCodeTransactionInProgressException "TransactionInProgressException"
+// The transaction with the given request token is already in progress.
+//
+// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
+// DynamoDB rejected the request because you retried a request with a different
+// payload but with an idempotent token that was already used.
+//
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems
+func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) {
+ req, out := c.TransactWriteItemsRequest(input)
+ return out, req.Send()
+}
+
+// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TransactWriteItems for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) {
+ req, out := c.TransactWriteItemsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUntagResource = "UntagResource"
+
+// UntagResourceRequest generates a "aws/request.Request" representing the
+// client's request for the UntagResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UntagResource for more information on using the UntagResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UntagResourceRequest method.
+// req, resp := client.UntagResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource
+func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
+ op := &request.Operation{
+ Name: opUntagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UntagResourceInput{}
+ }
+
+ output = &UntagResourceOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UntagResource API operation for Amazon DynamoDB.
+//
+// Removes the association of tags from an Amazon DynamoDB resource. You can
+// call UntagResource up to five times per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UntagResource for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource
+func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
+ req, out := c.UntagResourceRequest(input)
+ return out, req.Send()
+}
+
+// UntagResourceWithContext is the same as UntagResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UntagResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
+ req, out := c.UntagResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateContinuousBackups = "UpdateContinuousBackups"
+
+// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateContinuousBackups operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateContinuousBackupsRequest method.
+// req, resp := client.UpdateContinuousBackupsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups
+func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) {
+ op := &request.Operation{
+ Name: opUpdateContinuousBackups,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateContinuousBackupsInput{}
+ }
+
+ output = &UpdateContinuousBackupsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UpdateContinuousBackups API operation for Amazon DynamoDB.
+//
+// UpdateContinuousBackups enables or disables point in time recovery for the
+// specified table. A successful UpdateContinuousBackups call returns the current
+// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables
+// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus
+// will be set to ENABLED.
+//
+// Once continuous backups and point in time recovery are enabled, you can restore
+// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.
+//
+// LatestRestorableDateTime is typically 5 minutes before the current time.
+// You can restore your table to any point in time during the last 35 days.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UpdateContinuousBackups for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTableNotFoundException "TableNotFoundException"
+// A source table with the name TableName does not currently exist within the
+// subscriber's account.
+//
+// * ErrCodeContinuousBackupsUnavailableException "ContinuousBackupsUnavailableException"
+// Backups have not yet been enabled for this table.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups
+func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) {
+ req, out := c.UpdateContinuousBackupsRequest(input)
+ return out, req.Send()
+}
+
+// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateContinuousBackups for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) {
+ req, out := c.UpdateContinuousBackupsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateGlobalTable = "UpdateGlobalTable"
+
+// UpdateGlobalTableRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateGlobalTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateGlobalTable for more information on using the UpdateGlobalTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateGlobalTableRequest method.
+// req, resp := client.UpdateGlobalTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable
+func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) {
+ op := &request.Operation{
+ Name: opUpdateGlobalTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateGlobalTableInput{}
+ }
+
+ output = &UpdateGlobalTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UpdateGlobalTable API operation for Amazon DynamoDB.
+//
+// Adds or removes replicas in the specified global table. The global table
+// must already exist to be able to use this operation. Any replica to be added
+// must be empty, have the same name as the global table, have the same key
+// schema, have DynamoDB Streams enabled, and have the same provisioned and
+// maximum write capacity units.
+//
+// Although you can use UpdateGlobalTable to add replicas and remove replicas
+// in a single request, for simplicity we recommend that you issue separate
+// requests for adding or removing replicas.
+//
+// If global secondary indexes are specified, then the following conditions
+// must also be met:
+//
+// * The global secondary indexes must have the same name.
+//
+// * The global secondary indexes must have the same hash key and sort key
+// (if present).
+//
+// * The global secondary indexes must have the same provisioned and maximum
+// write capacity units.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UpdateGlobalTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException"
+// The specified global table does not exist.
+//
+// * ErrCodeReplicaAlreadyExistsException "ReplicaAlreadyExistsException"
+// The specified replica is already part of the global table.
+//
+// * ErrCodeReplicaNotFoundException "ReplicaNotFoundException"
+// The specified replica is no longer part of the global table.
+//
+// * ErrCodeTableNotFoundException "TableNotFoundException"
+// A source table with the name TableName does not currently exist within the
+// subscriber's account.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable
+func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) {
+ req, out := c.UpdateGlobalTableRequest(input)
+ return out, req.Send()
+}
+
+// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateGlobalTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) {
+ req, out := c.UpdateGlobalTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings"
+
+// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateGlobalTableSettings operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateGlobalTableSettingsRequest method.
+// req, resp := client.UpdateGlobalTableSettingsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings
+func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) {
+ op := &request.Operation{
+ Name: opUpdateGlobalTableSettings,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateGlobalTableSettingsInput{}
+ }
+
+ output = &UpdateGlobalTableSettingsOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UpdateGlobalTableSettings API operation for Amazon DynamoDB.
+//
+// Updates settings for a global table.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UpdateGlobalTableSettings for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException"
+// The specified global table does not exist.
+//
+// * ErrCodeReplicaNotFoundException "ReplicaNotFoundException"
+// The specified replica is no longer part of the global table.
+//
+// * ErrCodeIndexNotFoundException "IndexNotFoundException"
+// The operation tried to access a nonexistent index.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings
+func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) {
+ req, out := c.UpdateGlobalTableSettingsRequest(input)
+ return out, req.Send()
+}
+
+// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateGlobalTableSettings for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) {
+ req, out := c.UpdateGlobalTableSettingsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateItem = "UpdateItem"
+
+// UpdateItemRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateItem operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateItem for more information on using the UpdateItem
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateItemRequest method.
+// req, resp := client.UpdateItemRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem
+func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) {
+ op := &request.Operation{
+ Name: opUpdateItem,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateItemInput{}
+ }
+
+ output = &UpdateItemOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UpdateItem API operation for Amazon DynamoDB.
+//
+// Edits an existing item's attributes, or adds a new item to the table if it
+// does not already exist. You can put, delete, or add attribute values. You
+// can also perform a conditional update on an existing item (insert a new attribute
+// name-value pair if it doesn't exist, or replace an existing name-value pair
+// if it has certain expected attribute values).
+//
+// You can also return the item's attribute values in the same UpdateItem operation
+// using the ReturnValues parameter.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UpdateItem for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeConditionalCheckFailedException "ConditionalCheckFailedException"
+// A condition specified in the operation could not be evaluated.
+//
+// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
+// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+// requests that receive this exception. Your request is eventually successful,
+// unless your retry queue is too large to finish. Reduce the frequency of requests
+// and use exponential backoff. For more information, go to Error Retries and
+// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+// in the Amazon DynamoDB Developer Guide.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException"
+// An item collection is too large. This exception is only returned for tables
+// that have one or more local secondary indexes.
+//
+// * ErrCodeTransactionConflictException "TransactionConflictException"
+// Operation was rejected because there is an ongoing transaction for the item.
+//
+// * ErrCodeRequestLimitExceeded "RequestLimitExceeded"
+// Throughput exceeds the current throughput limit for your account. Please
+// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+// a limit increase.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem
+func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) {
+ req, out := c.UpdateItemRequest(input)
+ return out, req.Send()
+}
+
+// UpdateItemWithContext is the same as UpdateItem with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateItem for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) {
+ req, out := c.UpdateItemRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateTable = "UpdateTable"
+
+// UpdateTableRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateTable operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateTable for more information on using the UpdateTable
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateTableRequest method.
+// req, resp := client.UpdateTableRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable
+func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) {
+ op := &request.Operation{
+ Name: opUpdateTable,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateTableInput{}
+ }
+
+ output = &UpdateTableOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UpdateTable API operation for Amazon DynamoDB.
+//
+// Modifies the provisioned throughput settings, global secondary indexes, or
+// DynamoDB Streams settings for a given table.
+//
+// You can only perform one of the following operations at once:
+//
+// * Modify the provisioned throughput settings of the table.
+//
+// * Enable or disable DynamoDB Streams on the table.
+//
+// * Remove a global secondary index from the table.
+//
+// * Create a new global secondary index on the table. After the index begins
+// backfilling, you can use UpdateTable to perform other operations.
+//
+// UpdateTable is an asynchronous operation; while it is executing, the table
+// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot
+// issue another UpdateTable request. When the table returns to the ACTIVE state,
+// the UpdateTable operation is complete.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UpdateTable for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable
+func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) {
+ req, out := c.UpdateTableRequest(input)
+ return out, req.Send()
+}
+
+// UpdateTableWithContext is the same as UpdateTable with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateTable for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) {
+ req, out := c.UpdateTableRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUpdateTimeToLive = "UpdateTimeToLive"
+
+// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the
+// client's request for the UpdateTimeToLive operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UpdateTimeToLive for more information on using the UpdateTimeToLive
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UpdateTimeToLiveRequest method.
+// req, resp := client.UpdateTimeToLiveRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive
+func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) {
+ op := &request.Operation{
+ Name: opUpdateTimeToLive,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UpdateTimeToLiveInput{}
+ }
+
+ output = &UpdateTimeToLiveOutput{}
+ req = c.newRequest(op, input, output)
+ if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
+ de := discovererDescribeEndpoints{
+ Required: false,
+ EndpointCache: c.endpointCache,
+ Params: map[string]*string{
+ "op": aws.String(req.Operation.Name),
+ },
+ Client: c,
+ }
+
+ for k, v := range de.Params {
+ if v == nil {
+ delete(de.Params, k)
+ }
+ }
+
+ req.Handlers.Build.PushFrontNamed(request.NamedHandler{
+ Name: "crr.endpointdiscovery",
+ Fn: de.Handler,
+ })
+ }
+ return
+}
+
+// UpdateTimeToLive API operation for Amazon DynamoDB.
+//
+// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the
+// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification.
+// It can take up to one hour for the change to fully process. Any additional
+// UpdateTimeToLive calls for the same table during this one hour duration result
+// in a ValidationException.
+//
+// TTL compares the current time in epoch time format to the time stored in
+// the TTL attribute of an item. If the epoch time value stored in the attribute
+// is less than the current time, the item is marked as expired and subsequently
+// deleted.
+//
+// The epoch time format is the number of seconds elapsed since 12:00:00 AM
+// January 1, 1970 UTC.
+//
+// DynamoDB deletes expired items on a best-effort basis to ensure availability
+// of throughput for other data operations.
+//
+// DynamoDB typically deletes expired items within two days of expiration. The
+// exact duration within which an item gets deleted after expiration is specific
+// to the nature of the workload. Items that have expired and not been deleted
+// will still show up in reads, queries, and scans.
+//
+// As items are deleted, they are removed from any local secondary index and
+// global secondary index immediately in the same eventually consistent way
+// as a standard delete operation.
+//
+// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html)
+// in the Amazon DynamoDB Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon DynamoDB's
+// API operation UpdateTimeToLive for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeResourceInUseException "ResourceInUseException"
+// The operation conflicts with the resource's availability. For example, you
+// attempted to recreate an existing table, or tried to delete a table currently
+// in the CREATING state.
+//
+// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
+// The operation tried to access a nonexistent table or index. The resource
+// might not be specified correctly, or its status might not be ACTIVE.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// Up to 50 simultaneous table operations are allowed per account. These operations
+// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+// and RestoreTableToPointInTime.
+//
+// The only exception is when you are creating a table with one or more secondary
+// indexes. You can have up to 25 such requests running at a time; however,
+// if the table or index specifications are complex, DynamoDB might temporarily
+// reduce the number of concurrent operations.
+//
+// There is a soft account limit of 256 tables.
+//
+// * ErrCodeInternalServerError "InternalServerError"
+// An error occurred on the server side.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive
+func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) {
+ req, out := c.UpdateTimeToLiveRequest(input)
+ return out, req.Send()
+}
+
+// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UpdateTimeToLive for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) {
+ req, out := c.UpdateTimeToLiveRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// Represents an attribute for describing the key schema for the table and indexes.
+type AttributeDefinition struct {
+ _ struct{} `type:"structure"`
+
+ // A name for the attribute.
+ //
+ // AttributeName is a required field
+ AttributeName *string `min:"1" type:"string" required:"true"`
+
+ // The data type for the attribute, where:
+ //
+ // * S - the attribute is of type String
+ //
+ // * N - the attribute is of type Number
+ //
+ // * B - the attribute is of type Binary
+ //
+ // AttributeType is a required field
+ AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"`
+}
+
+// String returns the string representation
+func (s AttributeDefinition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttributeDefinition) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AttributeDefinition) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"}
+ if s.AttributeName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeName != nil && len(*s.AttributeName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1))
+ }
+ if s.AttributeType == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeName sets the AttributeName field's value.
+func (s *AttributeDefinition) SetAttributeName(v string) *AttributeDefinition {
+ s.AttributeName = &v
+ return s
+}
+
+// SetAttributeType sets the AttributeType field's value.
+func (s *AttributeDefinition) SetAttributeType(v string) *AttributeDefinition {
+ s.AttributeType = &v
+ return s
+}
+
+// Represents the data for an attribute.
+//
+// Each attribute value is described as a name-value pair. The name is the data
+// type, and the value is the data itself.
+//
+// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
+// in the Amazon DynamoDB Developer Guide.
+type AttributeValue struct {
+ _ struct{} `type:"structure"`
+
+ // An attribute of type Binary. For example:
+ //
+ // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
+ //
+ // B is automatically base64 encoded/decoded by the SDK.
+ B []byte `type:"blob"`
+
+ // An attribute of type Boolean. For example:
+ //
+ // "BOOL": true
+ BOOL *bool `type:"boolean"`
+
+ // An attribute of type Binary Set. For example:
+ //
+ // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
+ BS [][]byte `type:"list"`
+
+ // An attribute of type List. For example:
+ //
+ // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}]
+ L []*AttributeValue `type:"list"`
+
+ // An attribute of type Map. For example:
+ //
+ // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
+ M map[string]*AttributeValue `type:"map"`
+
+ // An attribute of type Number. For example:
+ //
+ // "N": "123.45"
+ //
+ // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility
+ // across languages and libraries. However, DynamoDB treats them as number type
+ // attributes for mathematical operations.
+ N *string `type:"string"`
+
+ // An attribute of type Number Set. For example:
+ //
+ // "NS": ["42.2", "-19", "7.5", "3.14"]
+ //
+ // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility
+ // across languages and libraries. However, DynamoDB treats them as number type
+ // attributes for mathematical operations.
+ NS []*string `type:"list"`
+
+ // An attribute of type Null. For example:
+ //
+ // "NULL": true
+ NULL *bool `type:"boolean"`
+
+ // An attribute of type String. For example:
+ //
+ // "S": "Hello"
+ S *string `type:"string"`
+
+ // An attribute of type String Set. For example:
+ //
+ // "SS": ["Giraffe", "Hippo" ,"Zebra"]
+ SS []*string `type:"list"`
+}
+
+// String returns the string representation
+func (s AttributeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttributeValue) GoString() string {
+ return s.String()
+}
+
+// SetB sets the B field's value.
+func (s *AttributeValue) SetB(v []byte) *AttributeValue {
+ s.B = v
+ return s
+}
+
+// SetBOOL sets the BOOL field's value.
+func (s *AttributeValue) SetBOOL(v bool) *AttributeValue {
+ s.BOOL = &v
+ return s
+}
+
+// SetBS sets the BS field's value.
+func (s *AttributeValue) SetBS(v [][]byte) *AttributeValue {
+ s.BS = v
+ return s
+}
+
+// SetL sets the L field's value.
+func (s *AttributeValue) SetL(v []*AttributeValue) *AttributeValue {
+ s.L = v
+ return s
+}
+
+// SetM sets the M field's value.
+func (s *AttributeValue) SetM(v map[string]*AttributeValue) *AttributeValue {
+ s.M = v
+ return s
+}
+
+// SetN sets the N field's value.
+func (s *AttributeValue) SetN(v string) *AttributeValue {
+ s.N = &v
+ return s
+}
+
+// SetNS sets the NS field's value.
+func (s *AttributeValue) SetNS(v []*string) *AttributeValue {
+ s.NS = v
+ return s
+}
+
+// SetNULL sets the NULL field's value.
+func (s *AttributeValue) SetNULL(v bool) *AttributeValue {
+ s.NULL = &v
+ return s
+}
+
+// SetS sets the S field's value.
+func (s *AttributeValue) SetS(v string) *AttributeValue {
+ s.S = &v
+ return s
+}
+
+// SetSS sets the SS field's value.
+func (s *AttributeValue) SetSS(v []*string) *AttributeValue {
+ s.SS = v
+ return s
+}
+
+// For the UpdateItem operation, represents the attributes to be modified, the
+// action to perform on each, and the new value for each.
+//
+// You cannot use UpdateItem to update any primary key attributes. Instead,
+// you will need to delete the item, and then use PutItem to create a new item
+// with new attributes.
+//
+// Attribute values cannot be null; string and binary type attributes must have
+// lengths greater than zero; and set type attributes must not be empty. Requests
+// with empty values will be rejected with a ValidationException exception.
+type AttributeValueUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies how to perform the update. Valid values are PUT (default), DELETE,
+ // and ADD. The behavior depends on whether the specified primary key already
+ // exists in the table.
+ //
+ // If an item with the specified Key is found in the table:
+ //
+ // * PUT - Adds the specified attribute to the item. If the attribute already
+ // exists, it is replaced by the new value.
+ //
+ // * DELETE - If no value is specified, the attribute and its value are removed
+ // from the item. The data type of the specified value must match the existing
+ // value's data type. If a set of values is specified, then those values
+ // are subtracted from the old set. For example, if the attribute value was
+ // the set [a,b,c] and the DELETE action specified [a,c], then the final
+ // attribute value would be [b]. Specifying an empty set is an error.
+ //
+ // * ADD - If the attribute does not already exist, then the attribute and
+ // its values are added to the item. If the attribute does exist, then the
+ // behavior of ADD depends on the data type of the attribute: If the existing
+ // attribute is a number, and if Value is also a number, then the Value is
+ // mathematically added to the existing attribute. If Value is a negative
+ // number, then it is subtracted from the existing attribute. If you use
+ // ADD to increment or decrement a number value for an item that doesn't
+ // exist before the update, DynamoDB uses 0 as the initial value. In addition,
+ // if you use ADD to update an existing item, and intend to increment or
+ // decrement an attribute value which does not yet exist, DynamoDB uses 0
+ // as the initial value. For example, suppose that the item you want to update
+ // does not yet have an attribute named itemcount, but you decide to ADD
+ // the number 3 to this attribute anyway, even though it currently does not
+ // exist. DynamoDB will create the itemcount attribute, set its initial value
+ // to 0, and finally add 3 to it. The result will be a new itemcount attribute
+ // in the item, with a value of 3. If the existing data type is a set, and
+ // if the Value is also a set, then the Value is added to the existing set.
+ // (This is a set operation, not mathematical addition.) For example, if
+ // the attribute value was the set [1,2], and the ADD action specified [3],
+ // then the final attribute value would be [1,2,3]. An error occurs if an
+ // Add action is specified for a set attribute and the attribute type specified
+ // does not match the existing set type. Both sets must have the same primitive
+ // data type. For example, if the existing data type is a set of strings,
+ // the Value must also be a set of strings. The same holds true for number
+ // sets and binary sets. This action is only valid for an existing attribute
+ // whose data type is number or is a set. Do not use ADD for any other data
+ // types.
+ //
+ // If no item with the specified Key is found:
+ //
+ // * PUT - DynamoDB creates a new item with the specified primary key, and
+ // then adds the attribute.
+ //
+ // * DELETE - Nothing happens; there is no attribute to delete.
+ //
+ // * ADD - DynamoDB creates an item with the supplied primary key and number
+ // (or set of numbers) for the attribute value. The only data types allowed
+ // are number and number set; no other data types can be specified.
+ Action *string `type:"string" enum:"AttributeAction"`
+
+ // Represents the data for an attribute.
+ //
+ // Each attribute value is described as a name-value pair. The name is the data
+ // type, and the value is the data itself.
+ //
+ // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
+ // in the Amazon DynamoDB Developer Guide.
+ Value *AttributeValue `type:"structure"`
+}
+
+// String returns the string representation
+func (s AttributeValueUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AttributeValueUpdate) GoString() string {
+ return s.String()
+}
+
+// SetAction sets the Action field's value.
+func (s *AttributeValueUpdate) SetAction(v string) *AttributeValueUpdate {
+ s.Action = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *AttributeValueUpdate) SetValue(v *AttributeValue) *AttributeValueUpdate {
+ s.Value = v
+ return s
+}
+
+// Represents the properties of the scaling policy.
+type AutoScalingPolicyDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the scaling policy.
+ PolicyName *string `min:"1" type:"string"`
+
+ // Represents a target tracking scaling policy configuration.
+ TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s AutoScalingPolicyDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingPolicyDescription) GoString() string {
+ return s.String()
+}
+
+// SetPolicyName sets the PolicyName field's value.
+func (s *AutoScalingPolicyDescription) SetPolicyName(v string) *AutoScalingPolicyDescription {
+ s.PolicyName = &v
+ return s
+}
+
+// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value.
+func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) *AutoScalingPolicyDescription {
+ s.TargetTrackingScalingPolicyConfiguration = v
+ return s
+}
+
+// Represents the autoscaling policy to be modified.
+type AutoScalingPolicyUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the scaling policy.
+ PolicyName *string `min:"1" type:"string"`
+
+ // Represents a target tracking scaling policy configuration.
+ //
+ // TargetTrackingScalingPolicyConfiguration is a required field
+ TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AutoScalingPolicyUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingPolicyUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AutoScalingPolicyUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"}
+ if s.PolicyName != nil && len(*s.PolicyName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1))
+ }
+ if s.TargetTrackingScalingPolicyConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration"))
+ }
+ if s.TargetTrackingScalingPolicyConfiguration != nil {
+ if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPolicyName sets the PolicyName field's value.
+func (s *AutoScalingPolicyUpdate) SetPolicyName(v string) *AutoScalingPolicyUpdate {
+ s.PolicyName = &v
+ return s
+}
+
+// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value.
+func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) *AutoScalingPolicyUpdate {
+ s.TargetTrackingScalingPolicyConfiguration = v
+ return s
+}
+
+// Represents the autoscaling settings for a global table or global secondary
+// index.
+type AutoScalingSettingsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Disabled autoscaling for this global table or global secondary index.
+ AutoScalingDisabled *bool `type:"boolean"`
+
+ // Role ARN used for configuring autoScaling policy.
+ AutoScalingRoleArn *string `type:"string"`
+
+ // The maximum capacity units that a global table or global secondary index
+ // should be scaled up to.
+ MaximumUnits *int64 `min:"1" type:"long"`
+
+ // The minimum capacity units that a global table or global secondary index
+ // should be scaled down to.
+ MinimumUnits *int64 `min:"1" type:"long"`
+
+ // Information about the scaling policies.
+ ScalingPolicies []*AutoScalingPolicyDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s AutoScalingSettingsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingSettingsDescription) GoString() string {
+ return s.String()
+}
+
+// SetAutoScalingDisabled sets the AutoScalingDisabled field's value.
+func (s *AutoScalingSettingsDescription) SetAutoScalingDisabled(v bool) *AutoScalingSettingsDescription {
+ s.AutoScalingDisabled = &v
+ return s
+}
+
+// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value.
+func (s *AutoScalingSettingsDescription) SetAutoScalingRoleArn(v string) *AutoScalingSettingsDescription {
+ s.AutoScalingRoleArn = &v
+ return s
+}
+
+// SetMaximumUnits sets the MaximumUnits field's value.
+func (s *AutoScalingSettingsDescription) SetMaximumUnits(v int64) *AutoScalingSettingsDescription {
+ s.MaximumUnits = &v
+ return s
+}
+
+// SetMinimumUnits sets the MinimumUnits field's value.
+func (s *AutoScalingSettingsDescription) SetMinimumUnits(v int64) *AutoScalingSettingsDescription {
+ s.MinimumUnits = &v
+ return s
+}
+
+// SetScalingPolicies sets the ScalingPolicies field's value.
+func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPolicyDescription) *AutoScalingSettingsDescription {
+ s.ScalingPolicies = v
+ return s
+}
+
+// Represents the autoscaling settings to be modified for a global table or
+// global secondary index.
+type AutoScalingSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Disabled autoscaling for this global table or global secondary index.
+ AutoScalingDisabled *bool `type:"boolean"`
+
+ // Role ARN used for configuring autoscaling policy.
+ AutoScalingRoleArn *string `min:"1" type:"string"`
+
+ // The maximum capacity units that a global table or global secondary index
+ // should be scaled up to.
+ MaximumUnits *int64 `min:"1" type:"long"`
+
+ // The minimum capacity units that a global table or global secondary index
+ // should be scaled down to.
+ MinimumUnits *int64 `min:"1" type:"long"`
+
+ // The scaling policy to apply for scaling target global table or global secondary
+ // index capacity units.
+ ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"`
+}
+
+// String returns the string representation
+func (s AutoScalingSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AutoScalingSettingsUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"}
+ if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AutoScalingRoleArn", 1))
+ }
+ if s.MaximumUnits != nil && *s.MaximumUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaximumUnits", 1))
+ }
+ if s.MinimumUnits != nil && *s.MinimumUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MinimumUnits", 1))
+ }
+ if s.ScalingPolicyUpdate != nil {
+ if err := s.ScalingPolicyUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ScalingPolicyUpdate", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAutoScalingDisabled sets the AutoScalingDisabled field's value.
+func (s *AutoScalingSettingsUpdate) SetAutoScalingDisabled(v bool) *AutoScalingSettingsUpdate {
+ s.AutoScalingDisabled = &v
+ return s
+}
+
+// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value.
+func (s *AutoScalingSettingsUpdate) SetAutoScalingRoleArn(v string) *AutoScalingSettingsUpdate {
+ s.AutoScalingRoleArn = &v
+ return s
+}
+
+// SetMaximumUnits sets the MaximumUnits field's value.
+func (s *AutoScalingSettingsUpdate) SetMaximumUnits(v int64) *AutoScalingSettingsUpdate {
+ s.MaximumUnits = &v
+ return s
+}
+
+// SetMinimumUnits sets the MinimumUnits field's value.
+func (s *AutoScalingSettingsUpdate) SetMinimumUnits(v int64) *AutoScalingSettingsUpdate {
+ s.MinimumUnits = &v
+ return s
+}
+
+// SetScalingPolicyUpdate sets the ScalingPolicyUpdate field's value.
+func (s *AutoScalingSettingsUpdate) SetScalingPolicyUpdate(v *AutoScalingPolicyUpdate) *AutoScalingSettingsUpdate {
+ s.ScalingPolicyUpdate = v
+ return s
+}
+
+// Represents the properties of a target tracking scaling policy.
+type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether scale in by the target tracking policy is disabled. If
+ // the value is true, scale in is disabled and the target tracking policy won't
+ // remove capacity from the scalable resource. Otherwise, scale in is enabled
+ // and the target tracking policy can remove capacity from the scalable resource.
+ // The default value is false.
+ DisableScaleIn *bool `type:"boolean"`
+
+ // The amount of time, in seconds, after a scale in activity completes before
+ // another scale in activity can start. The cooldown period is used to block
+ // subsequent scale in requests until it has expired. You should scale in conservatively
+ // to protect your application's availability. However, if another alarm triggers
+ // a scale out policy during the cooldown period after a scale-in, application
+ // autoscaling scales out your scalable target immediately.
+ ScaleInCooldown *int64 `type:"integer"`
+
+ // The amount of time, in seconds, after a scale out activity completes before
+ // another scale out activity can start. While the cooldown period is in effect,
+ // the capacity that has been added by the previous scale out event that initiated
+ // the cooldown is calculated as part of the desired capacity for the next scale
+ // out. You should continuously (but not excessively) scale out.
+ ScaleOutCooldown *int64 `type:"integer"`
+
+ // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
+ // (Base 10) or 2e-360 to 2e360 (Base 2).
+ //
+ // TargetValue is a required field
+ TargetValue *float64 `type:"double" required:"true"`
+}
+
+// String returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string {
+ return s.String()
+}
+
+// SetDisableScaleIn sets the DisableScaleIn field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
+ s.DisableScaleIn = &v
+ return s
+}
+
+// SetScaleInCooldown sets the ScaleInCooldown field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
+ s.ScaleInCooldown = &v
+ return s
+}
+
+// SetScaleOutCooldown sets the ScaleOutCooldown field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
+ s.ScaleOutCooldown = &v
+ return s
+}
+
+// SetTargetValue sets the TargetValue field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
+ s.TargetValue = &v
+ return s
+}
+
+// Represents the settings of a target tracking scaling policy that will be
+// modified.
+type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether scale in by the target tracking policy is disabled. If
+ // the value is true, scale in is disabled and the target tracking policy won't
+ // remove capacity from the scalable resource. Otherwise, scale in is enabled
+ // and the target tracking policy can remove capacity from the scalable resource.
+ // The default value is false.
+ DisableScaleIn *bool `type:"boolean"`
+
+ // The amount of time, in seconds, after a scale in activity completes before
+ // another scale in activity can start. The cooldown period is used to block
+ // subsequent scale in requests until it has expired. You should scale in conservatively
+ // to protect your application's availability. However, if another alarm triggers
+ // a scale out policy during the cooldown period after a scale-in, application
+ // autoscaling scales out your scalable target immediately.
+ ScaleInCooldown *int64 `type:"integer"`
+
+ // The amount of time, in seconds, after a scale out activity completes before
+ // another scale out activity can start. While the cooldown period is in effect,
+ // the capacity that has been added by the previous scale out event that initiated
+ // the cooldown is calculated as part of the desired capacity for the next scale
+ // out. You should continuously (but not excessively) scale out.
+ ScaleOutCooldown *int64 `type:"integer"`
+
+ // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
+ // (Base 10) or 2e-360 to 2e360 (Base 2).
+ //
+ // TargetValue is a required field
+ TargetValue *float64 `type:"double" required:"true"`
+}
+
+// String returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"}
+ if s.TargetValue == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetValue"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDisableScaleIn sets the DisableScaleIn field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
+ s.DisableScaleIn = &v
+ return s
+}
+
+// SetScaleInCooldown sets the ScaleInCooldown field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
+ s.ScaleInCooldown = &v
+ return s
+}
+
+// SetScaleOutCooldown sets the ScaleOutCooldown field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
+ s.ScaleOutCooldown = &v
+ return s
+}
+
+// SetTargetValue sets the TargetValue field's value.
+func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
+ s.TargetValue = &v
+ return s
+}
+
+// Contains the description of the backup created for the table.
+type BackupDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the details of the backup created for the table.
+ BackupDetails *BackupDetails `type:"structure"`
+
+ // Contains the details of the table when the backup was created.
+ SourceTableDetails *SourceTableDetails `type:"structure"`
+
+ // Contains the details of the features enabled on the table when the backup
+ // was created. For example, LSIs, GSIs, streams, TTL.
+ SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"`
+}
+
+// String returns the string representation
+func (s BackupDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BackupDescription) GoString() string {
+ return s.String()
+}
+
+// SetBackupDetails sets the BackupDetails field's value.
+func (s *BackupDescription) SetBackupDetails(v *BackupDetails) *BackupDescription {
+ s.BackupDetails = v
+ return s
+}
+
+// SetSourceTableDetails sets the SourceTableDetails field's value.
+func (s *BackupDescription) SetSourceTableDetails(v *SourceTableDetails) *BackupDescription {
+ s.SourceTableDetails = v
+ return s
+}
+
+// SetSourceTableFeatureDetails sets the SourceTableFeatureDetails field's value.
+func (s *BackupDescription) SetSourceTableFeatureDetails(v *SourceTableFeatureDetails) *BackupDescription {
+ s.SourceTableFeatureDetails = v
+ return s
+}
+
+// Contains the details of the backup created for the table.
+type BackupDetails struct {
+ _ struct{} `type:"structure"`
+
+ // ARN associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+
+ // Time at which the backup was created. This is the request time of the backup.
+ //
+ // BackupCreationDateTime is a required field
+ BackupCreationDateTime *time.Time `type:"timestamp" required:"true"`
+
+ // Time at which the automatic on-demand backup created by DynamoDB will expire.
+ // This SYSTEM on-demand backup expires automatically 35 days after its creation.
+ BackupExpiryDateTime *time.Time `type:"timestamp"`
+
+ // Name of the requested backup.
+ //
+ // BackupName is a required field
+ BackupName *string `min:"3" type:"string" required:"true"`
+
+ // Size of the backup in bytes.
+ BackupSizeBytes *int64 `type:"long"`
+
+ // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
+ //
+ // BackupStatus is a required field
+ BackupStatus *string `type:"string" required:"true" enum:"BackupStatus"`
+
+ // BackupType:
+ //
+ // * USER - You create and manage these using the on-demand backup feature.
+ //
+ // * SYSTEM - If you delete a table with point-in-time recovery enabled,
+ // a SYSTEM backup is automatically created and is retained for 35 days (at
+ // no additional cost). System backups allow you to restore the deleted table
+ // to the state it was in just before the point of deletion.
+ //
+ // * AWS_BACKUP - On-demand backup created by you from AWS Backup service.
+ //
+ // BackupType is a required field
+ BackupType *string `type:"string" required:"true" enum:"BackupType"`
+}
+
+// String returns the string representation
+func (s BackupDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BackupDetails) GoString() string {
+ return s.String()
+}
+
+// SetBackupArn sets the BackupArn field's value.
+func (s *BackupDetails) SetBackupArn(v string) *BackupDetails {
+ s.BackupArn = &v
+ return s
+}
+
+// SetBackupCreationDateTime sets the BackupCreationDateTime field's value.
+func (s *BackupDetails) SetBackupCreationDateTime(v time.Time) *BackupDetails {
+ s.BackupCreationDateTime = &v
+ return s
+}
+
+// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value.
+func (s *BackupDetails) SetBackupExpiryDateTime(v time.Time) *BackupDetails {
+ s.BackupExpiryDateTime = &v
+ return s
+}
+
+// SetBackupName sets the BackupName field's value.
+func (s *BackupDetails) SetBackupName(v string) *BackupDetails {
+ s.BackupName = &v
+ return s
+}
+
+// SetBackupSizeBytes sets the BackupSizeBytes field's value.
+func (s *BackupDetails) SetBackupSizeBytes(v int64) *BackupDetails {
+ s.BackupSizeBytes = &v
+ return s
+}
+
+// SetBackupStatus sets the BackupStatus field's value.
+func (s *BackupDetails) SetBackupStatus(v string) *BackupDetails {
+ s.BackupStatus = &v
+ return s
+}
+
+// SetBackupType sets the BackupType field's value.
+func (s *BackupDetails) SetBackupType(v string) *BackupDetails {
+ s.BackupType = &v
+ return s
+}
+
+// Contains details for the backup.
+type BackupSummary struct {
+ _ struct{} `type:"structure"`
+
+ // ARN associated with the backup.
+ BackupArn *string `min:"37" type:"string"`
+
+ // Time at which the backup was created.
+ BackupCreationDateTime *time.Time `type:"timestamp"`
+
+ // Time at which the automatic on-demand backup created by DynamoDB will expire.
+ // This SYSTEM on-demand backup expires automatically 35 days after its creation.
+ BackupExpiryDateTime *time.Time `type:"timestamp"`
+
+ // Name of the specified backup.
+ BackupName *string `min:"3" type:"string"`
+
+ // Size of the backup in bytes.
+ BackupSizeBytes *int64 `type:"long"`
+
+ // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
+ BackupStatus *string `type:"string" enum:"BackupStatus"`
+
+ // BackupType:
+ //
+ // * USER - You create and manage these using the on-demand backup feature.
+ //
+ // * SYSTEM - If you delete a table with point-in-time recovery enabled,
+ // a SYSTEM backup is automatically created and is retained for 35 days (at
+ // no additional cost). System backups allow you to restore the deleted table
+ // to the state it was in just before the point of deletion.
+ //
+ // * AWS_BACKUP - On-demand backup created by you from AWS Backup service.
+ BackupType *string `type:"string" enum:"BackupType"`
+
+ // ARN associated with the table.
+ TableArn *string `type:"string"`
+
+ // Unique identifier for the table.
+ TableId *string `type:"string"`
+
+ // Name of the table.
+ TableName *string `min:"3" type:"string"`
+}
+
+// String returns the string representation
+func (s BackupSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BackupSummary) GoString() string {
+ return s.String()
+}
+
+// SetBackupArn sets the BackupArn field's value.
+func (s *BackupSummary) SetBackupArn(v string) *BackupSummary {
+ s.BackupArn = &v
+ return s
+}
+
+// SetBackupCreationDateTime sets the BackupCreationDateTime field's value.
+func (s *BackupSummary) SetBackupCreationDateTime(v time.Time) *BackupSummary {
+ s.BackupCreationDateTime = &v
+ return s
+}
+
+// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value.
+func (s *BackupSummary) SetBackupExpiryDateTime(v time.Time) *BackupSummary {
+ s.BackupExpiryDateTime = &v
+ return s
+}
+
+// SetBackupName sets the BackupName field's value.
+func (s *BackupSummary) SetBackupName(v string) *BackupSummary {
+ s.BackupName = &v
+ return s
+}
+
+// SetBackupSizeBytes sets the BackupSizeBytes field's value.
+func (s *BackupSummary) SetBackupSizeBytes(v int64) *BackupSummary {
+ s.BackupSizeBytes = &v
+ return s
+}
+
+// SetBackupStatus sets the BackupStatus field's value.
+func (s *BackupSummary) SetBackupStatus(v string) *BackupSummary {
+ s.BackupStatus = &v
+ return s
+}
+
+// SetBackupType sets the BackupType field's value.
+func (s *BackupSummary) SetBackupType(v string) *BackupSummary {
+ s.BackupType = &v
+ return s
+}
+
+// SetTableArn sets the TableArn field's value.
+func (s *BackupSummary) SetTableArn(v string) *BackupSummary {
+ s.TableArn = &v
+ return s
+}
+
+// SetTableId sets the TableId field's value.
+func (s *BackupSummary) SetTableId(v string) *BackupSummary {
+ s.TableId = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *BackupSummary) SetTableName(v string) *BackupSummary {
+ s.TableName = &v
+ return s
+}
+
+// Represents the input of a BatchGetItem operation.
+type BatchGetItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of one or more table names and, for each table, a map that describes
+ // one or more items to retrieve from that table. Each table name can be used
+ // only once per BatchGetItem request.
+ //
+ // Each element in the map of items to retrieve consists of the following:
+ //
+ // * ConsistentRead - If true, a strongly consistent read is used; if false
+ // (the default), an eventually consistent read is used.
+ //
+ // * ExpressionAttributeNames - One or more substitution tokens for attribute
+ // names in the ProjectionExpression parameter. The following are some use
+ // cases for using ExpressionAttributeNames: To access an attribute whose
+ // name conflicts with a DynamoDB reserved word. To create a placeholder
+ // for repeating occurrences of an attribute name in an expression. To prevent
+ // special characters in an attribute name from being misinterpreted in an
+ // expression. Use the # character in an expression to dereference an attribute
+ // name. For example, consider the following attribute name: Percentile The
+ // name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could
+ // specify the following for ExpressionAttributeNames: {"#P":"Percentile"}
+ // You could then use this substitution in an expression, as in this example:
+ // #P = :val Tokens that begin with the : character are expression attribute
+ // values, which are placeholders for the actual value at runtime. For more
+ // information about expression attribute names, see Accessing Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // * Keys - An array of primary key attribute values that define specific
+ // items in the table. For each primary key, you must provide all of the
+ // key attributes. For example, with a simple primary key, you only need
+ // to provide the partition key value. For a composite key, you must provide
+ // both the partition key value and the sort key value.
+ //
+ // * ProjectionExpression - A string that identifies one or more attributes
+ // to retrieve from the table. These attributes can include scalars, sets,
+ // or elements of a JSON document. The attributes in the expression must
+ // be separated by commas. If no attribute names are specified, then all
+ // attributes are returned. If any of the requested attributes are not found,
+ // they do not appear in the result. For more information, see Accessing
+ // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression
+ // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // RequestItems is a required field
+ RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+}
+
+// String returns the string representation
+func (s BatchGetItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchGetItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchGetItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"}
+ if s.RequestItems == nil {
+ invalidParams.Add(request.NewErrParamRequired("RequestItems"))
+ }
+ if s.RequestItems != nil && len(s.RequestItems) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1))
+ }
+ if s.RequestItems != nil {
+ for i, v := range s.RequestItems {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRequestItems sets the RequestItems field's value.
+func (s *BatchGetItemInput) SetRequestItems(v map[string]*KeysAndAttributes) *BatchGetItemInput {
+ s.RequestItems = v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *BatchGetItemInput) SetReturnConsumedCapacity(v string) *BatchGetItemInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// Represents the output of a BatchGetItem operation.
+type BatchGetItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The read capacity units consumed by the entire BatchGetItem operation.
+ //
+ // Each element consists of:
+ //
+ // * TableName - The table that consumed the provisioned throughput.
+ //
+ // * CapacityUnits - The total number of capacity units consumed.
+ ConsumedCapacity []*ConsumedCapacity `type:"list"`
+
+ // A map of table name to a list of items. Each object in Responses consists
+ // of a table name, along with a map of attribute data consisting of the data
+ // type and attribute value.
+ Responses map[string][]map[string]*AttributeValue `type:"map"`
+
+ // A map of tables and their respective keys that were not processed with the
+ // current response. The UnprocessedKeys value is in the same form as RequestItems,
+ // so the value can be provided directly to a subsequent BatchGetItem operation.
+ // For more information, see RequestItems in the Request Parameters section.
+ //
+ // Each element consists of:
+ //
+ // * Keys - An array of primary key attribute values that define specific
+ // items in the table.
+ //
+ // * ProjectionExpression - One or more attributes to be retrieved from the
+ // table or index. By default, all attributes are returned. If a requested
+ // attribute is not found, it does not appear in the result.
+ //
+ // * ConsistentRead - The consistency of a read operation. If set to true,
+ // then a strongly consistent read is used; otherwise, an eventually consistent
+ // read is used.
+ //
+ // If there are no unprocessed keys remaining, the response contains an empty
+ // UnprocessedKeys map.
+ UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"`
+}
+
+// String returns the string representation
+func (s BatchGetItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchGetItemOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetResponses sets the Responses field's value.
+func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput {
+ s.Responses = v
+ return s
+}
+
+// SetUnprocessedKeys sets the UnprocessedKeys field's value.
+func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput {
+ s.UnprocessedKeys = v
+ return s
+}
+
+// Represents the input of a BatchWriteItem operation.
+type BatchWriteItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of one or more table names and, for each table, a list of operations
+ // to be performed (DeleteRequest or PutRequest). Each element in the map consists
+ // of the following:
+ //
+ // * DeleteRequest - Perform a DeleteItem operation on the specified item.
+ // The item to be deleted is identified by a Key subelement: Key - A map
+ // of primary key attribute values that uniquely identify the item. Each
+ // entry in this map consists of an attribute name and an attribute value.
+ // For each primary key, you must provide all of the key attributes. For
+ // example, with a simple primary key, you only need to provide a value for
+ // the partition key. For a composite primary key, you must provide values
+ // for both the partition key and the sort key.
+ //
+ // * PutRequest - Perform a PutItem operation on the specified item. The
+ // item to be put is identified by an Item subelement: Item - A map of attributes
+ // and their values. Each entry in this map consists of an attribute name
+ // and an attribute value. Attribute values must not be null; string and
+ // binary type attributes must have lengths greater than zero; and set type
+ // attributes must not be empty. Requests that contain empty values are rejected
+ // with a ValidationException exception. If you specify any attributes that
+ // are part of an index key, then the data types for those attributes must
+ // match those of the schema in the table's attribute definition.
+ //
+ // RequestItems is a required field
+ RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
+}
+
+// String returns the string representation
+func (s BatchWriteItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchWriteItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchWriteItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"}
+ if s.RequestItems == nil {
+ invalidParams.Add(request.NewErrParamRequired("RequestItems"))
+ }
+ if s.RequestItems != nil && len(s.RequestItems) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRequestItems sets the RequestItems field's value.
+func (s *BatchWriteItemInput) SetRequestItems(v map[string][]*WriteRequest) *BatchWriteItemInput {
+ s.RequestItems = v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *BatchWriteItemInput) SetReturnConsumedCapacity(v string) *BatchWriteItemInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
+func (s *BatchWriteItemInput) SetReturnItemCollectionMetrics(v string) *BatchWriteItemInput {
+ s.ReturnItemCollectionMetrics = &v
+ return s
+}
+
+// Represents the output of a BatchWriteItem operation.
+type BatchWriteItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The capacity units consumed by the entire BatchWriteItem operation.
+ //
+ // Each element consists of:
+ //
+ // * TableName - The table that consumed the provisioned throughput.
+ //
+ // * CapacityUnits - The total number of capacity units consumed.
+ ConsumedCapacity []*ConsumedCapacity `type:"list"`
+
+ // A list of tables that were processed by BatchWriteItem and, for each table,
+ // information about any item collections that were affected by individual DeleteItem
+ // or PutItem operations.
+ //
+ // Each entry consists of the following subelements:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, expressed
+ // in GB. This is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on the table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit. The estimate is
+ // subject to change over time; therefore, do not rely on the precision or
+ // accuracy of the estimate.
+ ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"`
+
+ // A map of tables and requests against those tables that were not processed.
+ // The UnprocessedItems value is in the same form as RequestItems, so you can
+ // provide this value directly to a subsequent BatchGetItem operation. For more
+ // information, see RequestItems in the Request Parameters section.
+ //
+ // Each UnprocessedItems entry consists of a table name and, for that table,
+ // a list of operations to perform (DeleteRequest or PutRequest).
+ //
+ // * DeleteRequest - Perform a DeleteItem operation on the specified item.
+ // The item to be deleted is identified by a Key subelement: Key - A map
+ // of primary key attribute values that uniquely identify the item. Each
+ // entry in this map consists of an attribute name and an attribute value.
+ //
+ // * PutRequest - Perform a PutItem operation on the specified item. The
+ // item to be put is identified by an Item subelement: Item - A map of attributes
+ // and their values. Each entry in this map consists of an attribute name
+ // and an attribute value. Attribute values must not be null; string and
+ // binary type attributes must have lengths greater than zero; and set type
+ // attributes must not be empty. Requests that contain empty values will
+ // be rejected with a ValidationException exception. If you specify any attributes
+ // that are part of an index key, then the data types for those attributes
+ // must match those of the schema in the table's attribute definition.
+ //
+ // If there are no unprocessed items remaining, the response contains an empty
+ // UnprocessedItems map.
+ UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"`
+}
+
+// String returns the string representation
+func (s BatchWriteItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchWriteItemOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *BatchWriteItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchWriteItemOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
+func (s *BatchWriteItemOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *BatchWriteItemOutput {
+ s.ItemCollectionMetrics = v
+ return s
+}
+
+// SetUnprocessedItems sets the UnprocessedItems field's value.
+func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) *BatchWriteItemOutput {
+ s.UnprocessedItems = v
+ return s
+}
+
+// Contains the details for the read/write capacity mode.
+type BillingModeSummary struct {
+ _ struct{} `type:"structure"`
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. This setting can be changed later.
+ //
+ // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend
+ // using PROVISIONED for predictable workloads.
+ //
+ // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST.
+ // We recommend using PAY_PER_REQUEST for unpredictable workloads.
+ BillingMode *string `type:"string" enum:"BillingMode"`
+
+ // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity
+ // mode.
+ LastUpdateToPayPerRequestDateTime *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation
+func (s BillingModeSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BillingModeSummary) GoString() string {
+ return s.String()
+}
+
+// SetBillingMode sets the BillingMode field's value.
+func (s *BillingModeSummary) SetBillingMode(v string) *BillingModeSummary {
+ s.BillingMode = &v
+ return s
+}
+
+// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value.
+func (s *BillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v time.Time) *BillingModeSummary {
+ s.LastUpdateToPayPerRequestDateTime = &v
+ return s
+}
+
+// An ordered list of errors for each item in the request which caused the transaction
+// to get cancelled. The values of the list are ordered according to the ordering
+// of the TransactWriteItems request parameter. If no error occurred for the
+// associated item an error with a Null code and Null message will be present.
+type CancellationReason struct {
+ _ struct{} `type:"structure"`
+
+ // Status code for the result of the cancelled transaction.
+ Code *string `type:"string"`
+
+ // Item in the request which caused the transaction to get cancelled.
+ Item map[string]*AttributeValue `type:"map"`
+
+ // Cancellation reason message description.
+ Message *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CancellationReason) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CancellationReason) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *CancellationReason) SetCode(v string) *CancellationReason {
+ s.Code = &v
+ return s
+}
+
+// SetItem sets the Item field's value.
+func (s *CancellationReason) SetItem(v map[string]*AttributeValue) *CancellationReason {
+ s.Item = v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *CancellationReason) SetMessage(v string) *CancellationReason {
+ s.Message = &v
+ return s
+}
+
+// Represents the amount of provisioned throughput capacity consumed on a table
+// or an index.
+type Capacity struct {
+ _ struct{} `type:"structure"`
+
+ // The total number of capacity units consumed on a table or an index.
+ CapacityUnits *float64 `type:"double"`
+
+ // The total number of read capacity units consumed on a table or an index.
+ ReadCapacityUnits *float64 `type:"double"`
+
+ // The total number of write capacity units consumed on a table or an index.
+ WriteCapacityUnits *float64 `type:"double"`
+}
+
+// String returns the string representation
+func (s Capacity) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Capacity) GoString() string {
+ return s.String()
+}
+
+// SetCapacityUnits sets the CapacityUnits field's value.
+func (s *Capacity) SetCapacityUnits(v float64) *Capacity {
+ s.CapacityUnits = &v
+ return s
+}
+
+// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
+func (s *Capacity) SetReadCapacityUnits(v float64) *Capacity {
+ s.ReadCapacityUnits = &v
+ return s
+}
+
+// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
+func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity {
+ s.WriteCapacityUnits = &v
+ return s
+}
+
+// Represents the selection criteria for a Query or Scan operation:
+//
+// * For a Query operation, Condition is used for specifying the KeyConditions
+// to use when querying a table or an index. For KeyConditions, only the
+// following comparison operators are supported: EQ | LE | LT | GE | GT |
+// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates
+// the query results and returns only the desired values.
+//
+// * For a Scan operation, Condition is used in a ScanFilter, which evaluates
+// the scan results and returns only the desired values.
+type Condition struct {
+ _ struct{} `type:"structure"`
+
+ // One or more values to evaluate against the supplied attribute. The number
+ // of values in the list depends on the ComparisonOperator being used.
+ //
+ // For type Number, value comparisons are numeric.
+ //
+ // String value comparisons for greater than, equals, or less than are based
+ // on ASCII character code values. For example, a is greater than A, and a is
+ // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
+ // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
+ //
+ // For Binary, DynamoDB treats each byte of the binary data as unsigned when
+ // it compares binary values.
+ AttributeValueList []*AttributeValue `type:"list"`
+
+ // A comparator for evaluating attributes. For example, equals, greater than,
+ // less than, etc.
+ //
+ // The following comparison operators are available:
+ //
+ // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ // BEGINS_WITH | IN | BETWEEN
+ //
+ // The following are descriptions of each comparison operator.
+ //
+ // * EQ : Equal. EQ is supported for all data types, including lists and
+ // maps. AttributeValueList can contain only one AttributeValue element of
+ // type String, Number, Binary, String Set, Number Set, or Binary Set. If
+ // an item contains an AttributeValue element of a different type than the
+ // one provided in the request, the value does not match. For example, {"S":"6"}
+ // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * NE : Not equal. NE is supported for all data types, including lists
+ // and maps. AttributeValueList can contain only one AttributeValue of type
+ // String, Number, Binary, String Set, Number Set, or Binary Set. If an item
+ // contains an AttributeValue of a different type than the one provided in
+ // the request, the value does not match. For example, {"S":"6"} does not
+ // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
+ //
+ // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue
+ // element of type String, Number, or Binary (not a set type). If an item
+ // contains an AttributeValue element of a different type than the one provided
+ // in the request, the value does not match. For example, {"S":"6"} does
+ // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * LT : Less than. AttributeValueList can contain only one AttributeValue
+ // of type String, Number, or Binary (not a set type). If an item contains
+ // an AttributeValue element of a different type than the one provided in
+ // the request, the value does not match. For example, {"S":"6"} does not
+ // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * GE : Greater than or equal. AttributeValueList can contain only one
+ // AttributeValue element of type String, Number, or Binary (not a set type).
+ // If an item contains an AttributeValue element of a different type than
+ // the one provided in the request, the value does not match. For example,
+ // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to
+ // {"NS":["6", "2", "1"]}.
+ //
+ // * GT : Greater than. AttributeValueList can contain only one AttributeValue
+ // element of type String, Number, or Binary (not a set type). If an item
+ // contains an AttributeValue element of a different type than the one provided
+ // in the request, the value does not match. For example, {"S":"6"} does
+ // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data
+ // types, including lists and maps. This operator tests for the existence
+ // of an attribute, not its data type. If the data type of attribute "a"
+ // is null, and you evaluate it using NOT_NULL, the result is a Boolean true.
+ // This result is because the attribute "a" exists; its data type is not
+ // relevant to the NOT_NULL comparison operator.
+ //
+ // * NULL : The attribute does not exist. NULL is supported for all data
+ // types, including lists and maps. This operator tests for the nonexistence
+ // of an attribute, not its data type. If the data type of attribute "a"
+ // is null, and you evaluate it using NULL, the result is a Boolean false.
+ // This is because the attribute "a" exists; its data type is not relevant
+ // to the NULL comparison operator.
+ //
+ // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList
+ // can contain only one AttributeValue element of type String, Number, or
+ // Binary (not a set type). If the target attribute of the comparison is
+ // of type String, then the operator checks for a substring match. If the
+ // target attribute of the comparison is of type Binary, then the operator
+ // looks for a subsequence of the target that matches the input. If the target
+ // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator
+ // evaluates to true if it finds an exact match with any member of the set.
+ // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can
+ // be a list; however, "b" cannot be a set, a map, or a list.
+ //
+ // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a
+ // value in a set. AttributeValueList can contain only one AttributeValue
+ // element of type String, Number, or Binary (not a set type). If the target
+ // attribute of the comparison is a String, then the operator checks for
+ // the absence of a substring match. If the target attribute of the comparison
+ // is Binary, then the operator checks for the absence of a subsequence of
+ // the target that matches the input. If the target attribute of the comparison
+ // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if
+ // it does not find an exact match with any member of the set. NOT_CONTAINS
+ // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be
+ // a list; however, "b" cannot be a set, a map, or a list.
+ //
+ // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only
+ // one AttributeValue of type String or Binary (not a Number or a set type).
+ // The target attribute of the comparison must be of type String or Binary
+ // (not a Number or a set type).
+ //
+ // * IN : Checks for matching elements in a list. AttributeValueList can
+ // contain one or more AttributeValue elements of type String, Number, or
+ // Binary. These attributes are compared against an existing attribute of
+ // an item. If any elements of the input are equal to the item attribute,
+ // the expression evaluates to true.
+ //
+ // * BETWEEN : Greater than or equal to the first value, and less than or
+ // equal to the second value. AttributeValueList must contain two AttributeValue
+ // elements of the same type, either String, Number, or Binary (not a set
+ // type). A target attribute matches if the target value is greater than,
+ // or equal to, the first element and less than, or equal to, the second
+ // element. If an item contains an AttributeValue element of a different
+ // type than the one provided in the request, the value does not match. For
+ // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does
+ // not compare to {"NS":["6", "2", "1"]}
+ //
+ // For usage examples of AttributeValueList and ComparisonOperator, see Legacy
+ // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ComparisonOperator is a required field
+ ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"`
+}
+
+// String returns the string representation
+func (s Condition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Condition) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Condition) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Condition"}
+ if s.ComparisonOperator == nil {
+ invalidParams.Add(request.NewErrParamRequired("ComparisonOperator"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeValueList sets the AttributeValueList field's value.
+func (s *Condition) SetAttributeValueList(v []*AttributeValue) *Condition {
+ s.AttributeValueList = v
+ return s
+}
+
+// SetComparisonOperator sets the ComparisonOperator field's value.
+func (s *Condition) SetComparisonOperator(v string) *Condition {
+ s.ComparisonOperator = &v
+ return s
+}
+
+// Represents a request to perform a check that an item exists or to check the
+// condition of specific attributes of the item..
+type ConditionCheck struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ //
+ // ConditionExpression is a required field
+ ConditionExpression *string `type:"string" required:"true"`
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // The primary key of the item to be checked. Each element consists of an attribute
+ // name and a value for that attribute.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
+ // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure,
+ // the valid values are: NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
+
+ // Name of the table for the check item request.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ConditionCheck) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ConditionCheck) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ConditionCheck) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ConditionCheck"}
+ if s.ConditionExpression == nil {
+ invalidParams.Add(request.NewErrParamRequired("ConditionExpression"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *ConditionCheck) SetConditionExpression(v string) *ConditionCheck {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *ConditionCheck) SetExpressionAttributeNames(v map[string]*string) *ConditionCheck {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *ConditionCheck) SetExpressionAttributeValues(v map[string]*AttributeValue) *ConditionCheck {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ConditionCheck) SetKey(v map[string]*AttributeValue) *ConditionCheck {
+ s.Key = v
+ return s
+}
+
+// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
+func (s *ConditionCheck) SetReturnValuesOnConditionCheckFailure(v string) *ConditionCheck {
+ s.ReturnValuesOnConditionCheckFailure = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *ConditionCheck) SetTableName(v string) *ConditionCheck {
+ s.TableName = &v
+ return s
+}
+
+// The capacity units consumed by an operation. The data returned includes the
+// total provisioned throughput consumed, along with statistics for the table
+// and any indexes involved in the operation. ConsumedCapacity is only returned
+// if the request asked for it. For more information, see Provisioned Throughput
+// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+// in the Amazon DynamoDB Developer Guide.
+type ConsumedCapacity struct {
+ _ struct{} `type:"structure"`
+
+ // The total number of capacity units consumed by the operation.
+ CapacityUnits *float64 `type:"double"`
+
+ // The amount of throughput consumed on each global index affected by the operation.
+ GlobalSecondaryIndexes map[string]*Capacity `type:"map"`
+
+ // The amount of throughput consumed on each local index affected by the operation.
+ LocalSecondaryIndexes map[string]*Capacity `type:"map"`
+
+ // The total number of read capacity units consumed by the operation.
+ ReadCapacityUnits *float64 `type:"double"`
+
+ // The amount of throughput consumed on the table affected by the operation.
+ Table *Capacity `type:"structure"`
+
+ // The name of the table that was affected by the operation.
+ TableName *string `min:"3" type:"string"`
+
+ // The total number of write capacity units consumed by the operation.
+ WriteCapacityUnits *float64 `type:"double"`
+}
+
+// String returns the string representation
+func (s ConsumedCapacity) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ConsumedCapacity) GoString() string {
+ return s.String()
+}
+
+// SetCapacityUnits sets the CapacityUnits field's value.
+func (s *ConsumedCapacity) SetCapacityUnits(v float64) *ConsumedCapacity {
+ s.CapacityUnits = &v
+ return s
+}
+
+// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
+func (s *ConsumedCapacity) SetGlobalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity {
+ s.GlobalSecondaryIndexes = v
+ return s
+}
+
+// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
+func (s *ConsumedCapacity) SetLocalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity {
+ s.LocalSecondaryIndexes = v
+ return s
+}
+
+// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
+func (s *ConsumedCapacity) SetReadCapacityUnits(v float64) *ConsumedCapacity {
+ s.ReadCapacityUnits = &v
+ return s
+}
+
+// SetTable sets the Table field's value.
+func (s *ConsumedCapacity) SetTable(v *Capacity) *ConsumedCapacity {
+ s.Table = v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *ConsumedCapacity) SetTableName(v string) *ConsumedCapacity {
+ s.TableName = &v
+ return s
+}
+
+// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
+func (s *ConsumedCapacity) SetWriteCapacityUnits(v float64) *ConsumedCapacity {
+ s.WriteCapacityUnits = &v
+ return s
+}
+
+// Represents the continuous backups and point in time recovery settings on
+// the table.
+type ContinuousBackupsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED
+ //
+ // ContinuousBackupsStatus is a required field
+ ContinuousBackupsStatus *string `type:"string" required:"true" enum:"ContinuousBackupsStatus"`
+
+ // The description of the point in time recovery settings applied to the table.
+ PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s ContinuousBackupsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ContinuousBackupsDescription) GoString() string {
+ return s.String()
+}
+
+// SetContinuousBackupsStatus sets the ContinuousBackupsStatus field's value.
+func (s *ContinuousBackupsDescription) SetContinuousBackupsStatus(v string) *ContinuousBackupsDescription {
+ s.ContinuousBackupsStatus = &v
+ return s
+}
+
+// SetPointInTimeRecoveryDescription sets the PointInTimeRecoveryDescription field's value.
+func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *PointInTimeRecoveryDescription) *ContinuousBackupsDescription {
+ s.PointInTimeRecoveryDescription = v
+ return s
+}
+
+type CreateBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // Specified name for the backup.
+ //
+ // BackupName is a required field
+ BackupName *string `min:"3" type:"string" required:"true"`
+
+ // The name of the table.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBackupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"}
+ if s.BackupName == nil {
+ invalidParams.Add(request.NewErrParamRequired("BackupName"))
+ }
+ if s.BackupName != nil && len(*s.BackupName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("BackupName", 3))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBackupName sets the BackupName field's value.
+func (s *CreateBackupInput) SetBackupName(v string) *CreateBackupInput {
+ s.BackupName = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *CreateBackupInput) SetTableName(v string) *CreateBackupInput {
+ s.TableName = &v
+ return s
+}
+
+type CreateBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the details of the backup created for the table.
+ BackupDetails *BackupDetails `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SetBackupDetails sets the BackupDetails field's value.
+func (s *CreateBackupOutput) SetBackupDetails(v *BackupDetails) *CreateBackupOutput {
+ s.BackupDetails = v
+ return s
+}
+
+// Represents a new global secondary index to be added to an existing table.
+type CreateGlobalSecondaryIndexAction struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index to be created.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The key schema for the global secondary index.
+ //
+ // KeySchema is a required field
+ KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Represents attributes that are copied (projected) from the table into an
+ // index. These are in addition to the primary key attributes and index key
+ // attributes, which are automatically projected.
+ //
+ // Projection is a required field
+ Projection *Projection `type:"structure" required:"true"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateGlobalSecondaryIndexAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateGlobalSecondaryIndexAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateGlobalSecondaryIndexAction) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.KeySchema == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
+ }
+ if s.Projection == nil {
+ invalidParams.Add(request.NewErrParamRequired("Projection"))
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Projection != nil {
+ if err := s.Projection.Validate(); err != nil {
+ invalidParams.AddNested("Projection", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *CreateGlobalSecondaryIndexAction) SetIndexName(v string) *CreateGlobalSecondaryIndexAction {
+ s.IndexName = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) *CreateGlobalSecondaryIndexAction {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction {
+ s.Projection = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *CreateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateGlobalSecondaryIndexAction {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+type CreateGlobalTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The global table name.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+
+ // The Regions where the global table needs to be created.
+ //
+ // ReplicationGroup is a required field
+ ReplicationGroup []*Replica `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateGlobalTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateGlobalTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateGlobalTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateGlobalTableInput"}
+ if s.GlobalTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
+ }
+ if s.ReplicationGroup == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReplicationGroup"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *CreateGlobalTableInput) SetGlobalTableName(v string) *CreateGlobalTableInput {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetReplicationGroup sets the ReplicationGroup field's value.
+func (s *CreateGlobalTableInput) SetReplicationGroup(v []*Replica) *CreateGlobalTableInput {
+ s.ReplicationGroup = v
+ return s
+}
+
+type CreateGlobalTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the details of the global table.
+ GlobalTableDescription *GlobalTableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateGlobalTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateGlobalTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTableDescription sets the GlobalTableDescription field's value.
+func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *CreateGlobalTableOutput {
+ s.GlobalTableDescription = v
+ return s
+}
+
+// Represents a replica to be added.
+type CreateReplicaAction struct {
+ _ struct{} `type:"structure"`
+
+ // The region of the replica to be added.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CreateReplicaAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateReplicaAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateReplicaAction) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateReplicaAction"}
+ if s.RegionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RegionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *CreateReplicaAction) SetRegionName(v string) *CreateReplicaAction {
+ s.RegionName = &v
+ return s
+}
+
+// Represents the input of a CreateTable operation.
+type CreateTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of attributes that describe the key schema for the table and indexes.
+ //
+ // AttributeDefinitions is a required field
+ AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"`
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. This setting can be changed later.
+ //
+ // * PROVISIONED - Sets the billing mode to PROVISIONED. We recommend using
+ // PROVISIONED for predictable workloads.
+ //
+ // * PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST. We recommend
+ // using PAY_PER_REQUEST for unpredictable workloads.
+ BillingMode *string `type:"string" enum:"BillingMode"`
+
+ // One or more global secondary indexes (the maximum is 20) to be created on
+ // the table. Each global secondary index in the array includes the following:
+ //
+ // * IndexName - The name of the global secondary index. Must be unique only
+ // for this table.
+ //
+ // * KeySchema - Specifies the key schema for the global secondary index.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
+ // - Only the index and primary keys are projected into the index. INCLUDE
+ // - Only the specified table attributes are projected into the index. The
+ // list of projected attributes is in NonKeyAttributes. ALL - All of the
+ // table attributes are projected into the index. NonKeyAttributes - A list
+ // of one or more non-key attribute names that are projected into the secondary
+ // index. The total count of attributes provided in NonKeyAttributes, summed
+ // across all of the secondary indexes, must not exceed 100. If you project
+ // the same attribute into two different indexes, this counts as two distinct
+ // attributes when determining the total.
+ //
+ // * ProvisionedThroughput - The provisioned throughput settings for the
+ // global secondary index, consisting of read and write capacity units.
+ GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"`
+
+ // Specifies the attributes that make up the primary key for a table or an index.
+ // The attributes in KeySchema must also be defined in the AttributeDefinitions
+ // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // Each KeySchemaElement in the array is composed of:
+ //
+ // * AttributeName - The name of this key attribute.
+ //
+ // * KeyType - The role that the key attribute will assume: HASH - partition
+ // key RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from the DynamoDB usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // For a simple primary key (partition key), you must provide exactly one element
+ // with a KeyType of HASH.
+ //
+ // For a composite primary key (partition key and sort key), you must provide
+ // exactly two elements, in this order: The first element must have a KeyType
+ // of HASH, and the second element must have a KeyType of RANGE.
+ //
+ // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // KeySchema is a required field
+ KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // One or more local secondary indexes (the maximum is 5) to be created on the
+ // table. Each index is scoped to a given partition key value. There is a 10
+ // GB size limit per partition key value; otherwise, the size of a local secondary
+ // index is unconstrained.
+ //
+ // Each local secondary index in the array includes the following:
+ //
+ // * IndexName - The name of the local secondary index. Must be unique only
+ // for this table.
+ //
+ // * KeySchema - Specifies the key schema for the local secondary index.
+ // The key schema must begin with the same partition key as the table.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
+ // - Only the index and primary keys are projected into the index. INCLUDE
+ // - Only the specified table attributes are projected into the index. The
+ // list of projected attributes is in NonKeyAttributes. ALL - All of the
+ // table attributes are projected into the index. NonKeyAttributes - A list
+ // of one or more non-key attribute names that are projected into the secondary
+ // index. The total count of attributes provided in NonKeyAttributes, summed
+ // across all of the secondary indexes, must not exceed 100. If you project
+ // the same attribute into two different indexes, this counts as two distinct
+ // attributes when determining the total.
+ LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"`
+
+ // Represents the provisioned throughput settings for a specified table or index.
+ // The settings can be modified using the UpdateTable operation.
+ //
+ // If you set BillingMode as PROVISIONED, you must specify this property. If
+ // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+
+ // Represents the settings used to enable server-side encryption.
+ SSESpecification *SSESpecification `type:"structure"`
+
+ // The settings for DynamoDB Streams on the table. These settings consist of:
+ //
+ // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled
+ // (true) or disabled (false).
+ //
+ // * StreamViewType - When an item in the table is modified, StreamViewType
+ // determines what information is written to the table's stream. Valid values
+ // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified
+ // item are written to the stream. NEW_IMAGE - The entire item, as it appears
+ // after it was modified, is written to the stream. OLD_IMAGE - The entire
+ // item, as it appeared before it was modified, is written to the stream.
+ // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item
+ // are written to the stream.
+ StreamSpecification *StreamSpecification `type:"structure"`
+
+ // The name of the table to create.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // A list of key-value pairs to label the table. For more information, see Tagging
+ // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html).
+ Tags []*Tag `type:"list"`
+}
+
+// String returns the string representation
+func (s CreateTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"}
+ if s.AttributeDefinitions == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions"))
+ }
+ if s.KeySchema == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.AttributeDefinitions != nil {
+ for i, v := range s.AttributeDefinitions {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.GlobalSecondaryIndexes != nil {
+ for i, v := range s.GlobalSecondaryIndexes {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.LocalSecondaryIndexes != nil {
+ for i, v := range s.LocalSecondaryIndexes {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeDefinitions sets the AttributeDefinitions field's value.
+func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput {
+ s.AttributeDefinitions = v
+ return s
+}
+
+// SetBillingMode sets the BillingMode field's value.
+func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput {
+ s.BillingMode = &v
+ return s
+}
+
+// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
+func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput {
+ s.GlobalSecondaryIndexes = v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput {
+ s.KeySchema = v
+ return s
+}
+
+// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
+func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput {
+ s.LocalSecondaryIndexes = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// SetSSESpecification sets the SSESpecification field's value.
+func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput {
+ s.SSESpecification = v
+ return s
+}
+
+// SetStreamSpecification sets the StreamSpecification field's value.
+func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput {
+ s.StreamSpecification = v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *CreateTableInput) SetTableName(v string) *CreateTableInput {
+ s.TableName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput {
+ s.Tags = v
+ return s
+}
+
+// Represents the output of a CreateTable operation.
+type CreateTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the properties of the table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetTableDescription sets the TableDescription field's value.
+func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput {
+ s.TableDescription = v
+ return s
+}
+
+// Represents a request to perform a DeleteItem operation.
+type Delete struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional delete to succeed.
+ ConditionExpression *string `type:"string"`
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // The primary key of the item to be deleted. Each element consists of an attribute
+ // name and a value for that attribute.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
+ // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid
+ // values are: NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
+
+ // Name of the table in which the item to be deleted resides.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Delete) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Delete) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Delete) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Delete"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *Delete) SetConditionExpression(v string) *Delete {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete {
+ s.Key = v
+ return s
+}
+
+// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
+func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete {
+ s.ReturnValuesOnConditionCheckFailure = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *Delete) SetTableName(v string) *Delete {
+ s.TableName = &v
+ return s
+}
+
+type DeleteBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBackupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"}
+ if s.BackupArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("BackupArn"))
+ }
+ if s.BackupArn != nil && len(*s.BackupArn) < 37 {
+ invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBackupArn sets the BackupArn field's value.
+func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput {
+ s.BackupArn = &v
+ return s
+}
+
+type DeleteBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the description of the backup created for the table.
+ BackupDescription *BackupDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SetBackupDescription sets the BackupDescription field's value.
+func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput {
+ s.BackupDescription = v
+ return s
+}
+
+// Represents a global secondary index to be deleted from an existing table.
+type DeleteGlobalSecondaryIndexAction struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index to be deleted.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteGlobalSecondaryIndexAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteGlobalSecondaryIndexAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteGlobalSecondaryIndexAction) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction {
+ s.IndexName = &v
+ return s
+}
+
+// Represents the input of a DeleteItem operation.
+type DeleteItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional DeleteItem
+ // to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // * Functions: attribute_exists | attribute_not_exists | attribute_type
+ // | contains | begins_with | size These function names are case-sensitive.
+ //
+ // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // * Logical operators: AND | OR | NOT
+ //
+ // For more information about condition expressions, see Condition Expressions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Expected map[string]*ExpectedAttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Specifying Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Condition Expressions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // A map of attribute names to AttributeValue objects, representing the primary
+ // key of the item to delete.
+ //
+ // For the primary key, you must provide all of the attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
+
+ // Use ReturnValues if you want to get the item attributes as they appeared
+ // before they were deleted. For DeleteItem, the valid values are:
+ //
+ // * NONE - If ReturnValues is not specified, or if its value is NONE, then
+ // nothing is returned. (This setting is the default for ReturnValues.)
+ //
+ // * ALL_OLD - The content of the old item is returned.
+ //
+ // The ReturnValues parameter is used by several DynamoDB operations; however,
+ // DeleteItem does not recognize any values other than NONE or ALL_OLD.
+ ReturnValues *string `type:"string" enum:"ReturnValue"`
+
+ // The name of the table from which to delete the item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetConditionalOperator sets the ConditionalOperator field's value.
+func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput {
+ s.ConditionalOperator = &v
+ return s
+}
+
+// SetExpected sets the Expected field's value.
+func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput {
+ s.Expected = v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput {
+ s.Key = v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
+func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput {
+ s.ReturnItemCollectionMetrics = &v
+ return s
+}
+
+// SetReturnValues sets the ReturnValues field's value.
+func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput {
+ s.ReturnValues = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of a DeleteItem operation.
+type DeleteItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attribute names to AttributeValue objects, representing the item
+ // as it appeared before the DeleteItem operation. This map appears in the response
+ // only if ReturnValues was specified as ALL_OLD in the request.
+ Attributes map[string]*AttributeValue `type:"map"`
+
+ // The capacity units consumed by the DeleteItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics
+ // for the table and any indexes involved in the operation. ConsumedCapacity
+ // is only returned if the ReturnConsumedCapacity parameter was specified. For
+ // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // Information about item collections, if any, that were affected by the DeleteItem
+ // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
+ // parameter was specified. If the table does not have any local secondary indexes,
+ // this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item itself.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on that table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit. The estimate is
+ // subject to change over time; therefore, do not rely on the precision or
+ // accuracy of the estimate.
+ ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteItemOutput) GoString() string {
+ return s.String()
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput {
+ s.Attributes = v
+ return s
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
+func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput {
+ s.ItemCollectionMetrics = v
+ return s
+}
+
+// Represents a replica to be removed.
+type DeleteReplicaAction struct {
+ _ struct{} `type:"structure"`
+
+ // The region of the replica to be removed.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteReplicaAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteReplicaAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteReplicaAction) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"}
+ if s.RegionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RegionName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction {
+ s.RegionName = &v
+ return s
+}
+
+// Represents a request to perform a DeleteItem operation on an item.
+type DeleteRequest struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attribute name to attribute values, representing the primary key
+ // of the item to delete. All of the table's primary key attributes must be
+ // specified, and their data types must match those of the table's key schema.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRequest) GoString() string {
+ return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest {
+ s.Key = v
+ return s
+}
+
+// Represents the input of a DeleteTable operation.
+type DeleteTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to delete.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"}
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTableName sets the TableName field's value.
+func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of a DeleteTable operation.
+type DeleteTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the properties of a table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetTableDescription sets the TableDescription field's value.
+func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput {
+ s.TableDescription = v
+ return s
+}
+
+type DescribeBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeBackupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"}
+ if s.BackupArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("BackupArn"))
+ }
+ if s.BackupArn != nil && len(*s.BackupArn) < 37 {
+ invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBackupArn sets the BackupArn field's value.
+func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput {
+ s.BackupArn = &v
+ return s
+}
+
+type DescribeBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the description of the backup created for the table.
+ BackupDescription *BackupDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SetBackupDescription sets the BackupDescription field's value.
+func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput {
+ s.BackupDescription = v
+ return s
+}
+
+type DescribeContinuousBackupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the table for which the customer wants to check the continuous backups
+ // and point in time recovery settings.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeContinuousBackupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeContinuousBackupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeContinuousBackupsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"}
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTableName sets the TableName field's value.
+func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput {
+ s.TableName = &v
+ return s
+}
+
+type DescribeContinuousBackupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the continuous backups and point in time recovery settings on
+ // the table.
+ ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeContinuousBackupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeContinuousBackupsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value.
+func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput {
+ s.ContinuousBackupsDescription = v
+ return s
+}
+
+type DescribeEndpointsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeEndpointsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeEndpointsInput) GoString() string {
+ return s.String()
+}
+
+type DescribeEndpointsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // List of endpoints.
+ //
+ // Endpoints is a required field
+ Endpoints []*Endpoint `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeEndpointsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeEndpointsOutput) GoString() string {
+ return s.String()
+}
+
+// SetEndpoints sets the Endpoints field's value.
+func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput {
+ s.Endpoints = v
+ return s
+}
+
+type DescribeGlobalTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global table.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeGlobalTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"}
+ if s.GlobalTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput {
+ s.GlobalTableName = &v
+ return s
+}
+
+type DescribeGlobalTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the details of the global table.
+ GlobalTableDescription *GlobalTableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTableDescription sets the GlobalTableDescription field's value.
+func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput {
+ s.GlobalTableDescription = v
+ return s
+}
+
+type DescribeGlobalTableSettingsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global table to describe.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableSettingsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableSettingsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeGlobalTableSettingsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"}
+ if s.GlobalTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput {
+ s.GlobalTableName = &v
+ return s
+}
+
+type DescribeGlobalTableSettingsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global table.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The Region-specific settings for the global table.
+ ReplicaSettings []*ReplicaSettingsDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeGlobalTableSettingsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeGlobalTableSettingsOutput) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetReplicaSettings sets the ReplicaSettings field's value.
+func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput {
+ s.ReplicaSettings = v
+ return s
+}
+
+// Represents the input of a DescribeLimits operation. Has no content.
+type DescribeLimitsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeLimitsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLimitsInput) GoString() string {
+ return s.String()
+}
+
+// Represents the output of a DescribeLimits operation.
+type DescribeLimitsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The maximum total read capacity units that your account allows you to provision
+ // across all of your tables in this Region.
+ AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum total write capacity units that your account allows you to provision
+ // across all of your tables in this Region.
+ AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum read capacity units that your account allows you to provision
+ // for a new table that you are creating in this Region, including the read
+ // capacity units provisioned for its global secondary indexes (GSIs).
+ TableMaxReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // The maximum write capacity units that your account allows you to provision
+ // for a new table that you are creating in this Region, including the write
+ // capacity units provisioned for its global secondary indexes (GSIs).
+ TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s DescribeLimitsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeLimitsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value.
+func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput {
+ s.AccountMaxReadCapacityUnits = &v
+ return s
+}
+
+// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value.
+func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput {
+ s.AccountMaxWriteCapacityUnits = &v
+ return s
+}
+
+// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value.
+func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput {
+ s.TableMaxReadCapacityUnits = &v
+ return s
+}
+
+// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value.
+func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput {
+ s.TableMaxWriteCapacityUnits = &v
+ return s
+}
+
+// Represents the input of a DescribeTable operation.
+type DescribeTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to describe.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"}
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTableName sets the TableName field's value.
+func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of a DescribeTable operation.
+type DescribeTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The properties of the table.
+ Table *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetTable sets the Table field's value.
+func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput {
+ s.Table = v
+ return s
+}
+
+type DescribeTimeToLiveInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to be described.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeTimeToLiveInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTimeToLiveInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeTimeToLiveInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"}
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTableName sets the TableName field's value.
+func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput {
+ s.TableName = &v
+ return s
+}
+
+type DescribeTimeToLiveOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The description of the Time to Live (TTL) status on the specified table.
+ TimeToLiveDescription *TimeToLiveDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s DescribeTimeToLiveOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeTimeToLiveOutput) GoString() string {
+ return s.String()
+}
+
+// SetTimeToLiveDescription sets the TimeToLiveDescription field's value.
+func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput {
+ s.TimeToLiveDescription = v
+ return s
+}
+
+// An endpoint information details.
+type Endpoint struct {
+ _ struct{} `type:"structure"`
+
+ // IP address of the endpoint.
+ //
+ // Address is a required field
+ Address *string `type:"string" required:"true"`
+
+ // Endpoint cache time to live (TTL) value.
+ //
+ // CachePeriodInMinutes is a required field
+ CachePeriodInMinutes *int64 `type:"long" required:"true"`
+}
+
+// String returns the string representation
+func (s Endpoint) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Endpoint) GoString() string {
+ return s.String()
+}
+
+// SetAddress sets the Address field's value.
+func (s *Endpoint) SetAddress(v string) *Endpoint {
+ s.Address = &v
+ return s
+}
+
+// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value.
+func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint {
+ s.CachePeriodInMinutes = &v
+ return s
+}
+
+// Represents a condition to be compared with an attribute value. This condition
+// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison
+// evaluates to true, the operation succeeds; if not, the operation fails. You
+// can use ExpectedAttributeValue in one of two different ways:
+//
+// * Use AttributeValueList to specify one or more values to compare against
+// an attribute. Use ComparisonOperator to specify how you want to perform
+// the comparison. If the comparison evaluates to true, then the conditional
+// operation succeeds.
+//
+// * Use Value to specify a value that DynamoDB will compare against an attribute.
+// If the values match, then ExpectedAttributeValue evaluates to true and
+// the conditional operation succeeds. Optionally, you can also set Exists
+// to false, indicating that you do not expect to find the attribute value
+// in the table. In this case, the conditional operation succeeds only if
+// the comparison evaluates to false.
+//
+// Value and Exists are incompatible with AttributeValueList and ComparisonOperator.
+// Note that if you use both sets of parameters at once, DynamoDB will return
+// a ValidationException exception.
+type ExpectedAttributeValue struct {
+ _ struct{} `type:"structure"`
+
+ // One or more values to evaluate against the supplied attribute. The number
+ // of values in the list depends on the ComparisonOperator being used.
+ //
+ // For type Number, value comparisons are numeric.
+ //
+ // String value comparisons for greater than, equals, or less than are based
+ // on ASCII character code values. For example, a is greater than A, and a is
+ // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
+ // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
+ //
+ // For Binary, DynamoDB treats each byte of the binary data as unsigned when
+ // it compares binary values.
+ //
+ // For information on specifying data types in JSON, see JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributeValueList []*AttributeValue `type:"list"`
+
+ // A comparator for evaluating attributes in the AttributeValueList. For example,
+ // equals, greater than, less than, etc.
+ //
+ // The following comparison operators are available:
+ //
+ // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ // BEGINS_WITH | IN | BETWEEN
+ //
+ // The following are descriptions of each comparison operator.
+ //
+ // * EQ : Equal. EQ is supported for all data types, including lists and
+ // maps. AttributeValueList can contain only one AttributeValue element of
+ // type String, Number, Binary, String Set, Number Set, or Binary Set. If
+ // an item contains an AttributeValue element of a different type than the
+ // one provided in the request, the value does not match. For example, {"S":"6"}
+ // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * NE : Not equal. NE is supported for all data types, including lists
+ // and maps. AttributeValueList can contain only one AttributeValue of type
+ // String, Number, Binary, String Set, Number Set, or Binary Set. If an item
+ // contains an AttributeValue of a different type than the one provided in
+ // the request, the value does not match. For example, {"S":"6"} does not
+ // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
+ //
+ // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue
+ // element of type String, Number, or Binary (not a set type). If an item
+ // contains an AttributeValue element of a different type than the one provided
+ // in the request, the value does not match. For example, {"S":"6"} does
+ // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * LT : Less than. AttributeValueList can contain only one AttributeValue
+ // of type String, Number, or Binary (not a set type). If an item contains
+ // an AttributeValue element of a different type than the one provided in
+ // the request, the value does not match. For example, {"S":"6"} does not
+ // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * GE : Greater than or equal. AttributeValueList can contain only one
+ // AttributeValue element of type String, Number, or Binary (not a set type).
+ // If an item contains an AttributeValue element of a different type than
+ // the one provided in the request, the value does not match. For example,
+ // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to
+ // {"NS":["6", "2", "1"]}.
+ //
+ // * GT : Greater than. AttributeValueList can contain only one AttributeValue
+ // element of type String, Number, or Binary (not a set type). If an item
+ // contains an AttributeValue element of a different type than the one provided
+ // in the request, the value does not match. For example, {"S":"6"} does
+ // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
+ // "1"]}.
+ //
+ // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data
+ // types, including lists and maps. This operator tests for the existence
+ // of an attribute, not its data type. If the data type of attribute "a"
+ // is null, and you evaluate it using NOT_NULL, the result is a Boolean true.
+ // This result is because the attribute "a" exists; its data type is not
+ // relevant to the NOT_NULL comparison operator.
+ //
+ // * NULL : The attribute does not exist. NULL is supported for all data
+ // types, including lists and maps. This operator tests for the nonexistence
+ // of an attribute, not its data type. If the data type of attribute "a"
+ // is null, and you evaluate it using NULL, the result is a Boolean false.
+ // This is because the attribute "a" exists; its data type is not relevant
+ // to the NULL comparison operator.
+ //
+ // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList
+ // can contain only one AttributeValue element of type String, Number, or
+ // Binary (not a set type). If the target attribute of the comparison is
+ // of type String, then the operator checks for a substring match. If the
+ // target attribute of the comparison is of type Binary, then the operator
+ // looks for a subsequence of the target that matches the input. If the target
+ // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator
+ // evaluates to true if it finds an exact match with any member of the set.
+ // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can
+ // be a list; however, "b" cannot be a set, a map, or a list.
+ //
+ // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a
+ // value in a set. AttributeValueList can contain only one AttributeValue
+ // element of type String, Number, or Binary (not a set type). If the target
+ // attribute of the comparison is a String, then the operator checks for
+ // the absence of a substring match. If the target attribute of the comparison
+ // is Binary, then the operator checks for the absence of a subsequence of
+ // the target that matches the input. If the target attribute of the comparison
+ // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if
+ // it does not find an exact match with any member of the set. NOT_CONTAINS
+ // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be
+ // a list; however, "b" cannot be a set, a map, or a list.
+ //
+ // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only
+ // one AttributeValue of type String or Binary (not a Number or a set type).
+ // The target attribute of the comparison must be of type String or Binary
+ // (not a Number or a set type).
+ //
+ // * IN : Checks for matching elements in a list. AttributeValueList can
+ // contain one or more AttributeValue elements of type String, Number, or
+ // Binary. These attributes are compared against an existing attribute of
+ // an item. If any elements of the input are equal to the item attribute,
+ // the expression evaluates to true.
+ //
+ // * BETWEEN : Greater than or equal to the first value, and less than or
+ // equal to the second value. AttributeValueList must contain two AttributeValue
+ // elements of the same type, either String, Number, or Binary (not a set
+ // type). A target attribute matches if the target value is greater than,
+ // or equal to, the first element and less than, or equal to, the second
+ // element. If an item contains an AttributeValue element of a different
+ // type than the one provided in the request, the value does not match. For
+ // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does
+ // not compare to {"NS":["6", "2", "1"]}
+ ComparisonOperator *string `type:"string" enum:"ComparisonOperator"`
+
+ // Causes DynamoDB to evaluate the value before attempting a conditional operation:
+ //
+ // * If Exists is true, DynamoDB will check to see if that attribute value
+ // already exists in the table. If it is found, then the operation succeeds.
+ // If it is not found, the operation fails with a ConditionCheckFailedException.
+ //
+ // * If Exists is false, DynamoDB assumes that the attribute value does not
+ // exist in the table. If in fact the value does not exist, then the assumption
+ // is valid and the operation succeeds. If the value is found, despite the
+ // assumption that it does not exist, the operation fails with a ConditionCheckFailedException.
+ //
+ // The default setting for Exists is true. If you supply a Value all by itself,
+ // DynamoDB assumes the attribute exists: You don't have to set Exists to true,
+ // because it is implied.
+ //
+ // DynamoDB returns a ValidationException if:
+ //
+ // * Exists is true but there is no Value to check. (You expect a value to
+ // exist, but don't specify what that value is.)
+ //
+ // * Exists is false but you also provide a Value. (You cannot expect an
+ // attribute to have a value, while also expecting it not to exist.)
+ Exists *bool `type:"boolean"`
+
+ // Represents the data for the expected attribute.
+ //
+ // Each attribute value is described as a name-value pair. The name is the data
+ // type, and the value is the data itself.
+ //
+ // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
+ // in the Amazon DynamoDB Developer Guide.
+ Value *AttributeValue `type:"structure"`
+}
+
+// String returns the string representation
+func (s ExpectedAttributeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ExpectedAttributeValue) GoString() string {
+ return s.String()
+}
+
+// SetAttributeValueList sets the AttributeValueList field's value.
+func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue {
+ s.AttributeValueList = v
+ return s
+}
+
+// SetComparisonOperator sets the ComparisonOperator field's value.
+func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue {
+ s.ComparisonOperator = &v
+ return s
+}
+
+// SetExists sets the Exists field's value.
+func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue {
+ s.Exists = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue {
+ s.Value = v
+ return s
+}
+
+// Specifies an item and related attribute values to retrieve in a TransactGetItem
+// object.
+type Get struct {
+ _ struct{} `type:"structure"`
+
+ // One or more substitution tokens for attribute names in the ProjectionExpression
+ // parameter.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // A map of attribute names to AttributeValue objects that specifies the primary
+ // key of the item to retrieve.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // A string that identifies one or more attributes of the specified item to
+ // retrieve from the table. The attributes in the expression must be separated
+ // by commas. If no attribute names are specified, then all attributes of the
+ // specified item are returned. If any of the requested attributes are not found,
+ // they do not appear in the result.
+ ProjectionExpression *string `type:"string"`
+
+ // The name of the table from which to retrieve the specified item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Get) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Get) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Get) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Get"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *Get) SetExpressionAttributeNames(v map[string]*string) *Get {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Get) SetKey(v map[string]*AttributeValue) *Get {
+ s.Key = v
+ return s
+}
+
+// SetProjectionExpression sets the ProjectionExpression field's value.
+func (s *Get) SetProjectionExpression(v string) *Get {
+ s.ProjectionExpression = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *Get) SetTableName(v string) *Get {
+ s.TableName = &v
+ return s
+}
+
+// Represents the input of a GetItem operation.
+type GetItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []*string `min:"1" type:"list"`
+
+ // Determines the read consistency model: If set to true, then the operation
+ // uses strongly consistent reads; otherwise, the operation uses eventually
+ // consistent reads.
+ ConsistentRead *bool `type:"boolean"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Specifying Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // A map of attribute names to AttributeValue objects, representing the primary
+ // key of the item to retrieve.
+ //
+ // For the primary key, you must provide all of the attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document.
+ // The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes are returned. If
+ // any of the requested attributes are not found, they do not appear in the
+ // result.
+ //
+ // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // The name of the table containing the requested item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetItemInput"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributesToGet sets the AttributesToGet field's value.
+func (s *GetItemInput) SetAttributesToGet(v []*string) *GetItemInput {
+ s.AttributesToGet = v
+ return s
+}
+
+// SetConsistentRead sets the ConsistentRead field's value.
+func (s *GetItemInput) SetConsistentRead(v bool) *GetItemInput {
+ s.ConsistentRead = &v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *GetItemInput) SetExpressionAttributeNames(v map[string]*string) *GetItemInput {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetItemInput) SetKey(v map[string]*AttributeValue) *GetItemInput {
+ s.Key = v
+ return s
+}
+
+// SetProjectionExpression sets the ProjectionExpression field's value.
+func (s *GetItemInput) SetProjectionExpression(v string) *GetItemInput {
+ s.ProjectionExpression = &v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *GetItemInput) SetReturnConsumedCapacity(v string) *GetItemInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *GetItemInput) SetTableName(v string) *GetItemInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of a GetItem operation.
+type GetItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The capacity units consumed by the GetItem operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression.
+ Item map[string]*AttributeValue `type:"map"`
+}
+
+// String returns the string representation
+func (s GetItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetItemOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *GetItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *GetItemOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetItem sets the Item field's value.
+func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput {
+ s.Item = v
+ return s
+}
+
+// Represents the properties of a global secondary index.
+type GlobalSecondaryIndex struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The complete key schema for a global secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // KeySchema is a required field
+ KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ //
+ // Projection is a required field
+ Projection *Projection `type:"structure" required:"true"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndex) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndex) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlobalSecondaryIndex) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.KeySchema == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
+ }
+ if s.Projection == nil {
+ invalidParams.Add(request.NewErrParamRequired("Projection"))
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Projection != nil {
+ if err := s.Projection.Validate(); err != nil {
+ invalidParams.AddNested("Projection", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *GlobalSecondaryIndex) SetIndexName(v string) *GlobalSecondaryIndex {
+ s.IndexName = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndex {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex {
+ s.Projection = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *GlobalSecondaryIndex) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndex {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// Represents the properties of a global secondary index.
+type GlobalSecondaryIndexDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether the index is currently backfilling. Backfilling is the
+ // process of reading items from the table and determining whether they can
+ // be added to the index. (Not all items will qualify: For example, a partition
+ // key cannot have any duplicate values.) If an item can be added to the index,
+ // DynamoDB will do so. After all items have been processed, the backfilling
+ // operation is complete and Backfilling is false.
+ //
+ // For indexes that were created during a CreateTable operation, the Backfilling
+ // attribute does not appear in the DescribeTable output.
+ Backfilling *bool `type:"boolean"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the index.
+ IndexArn *string `type:"string"`
+
+ // The name of the global secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The total size of the specified index, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ IndexSizeBytes *int64 `type:"long"`
+
+ // The current state of the global secondary index:
+ //
+ // * CREATING - The index is being created.
+ //
+ // * UPDATING - The index is being updated.
+ //
+ // * DELETING - The index is being deleted.
+ //
+ // * ACTIVE - The index is ready for use.
+ IndexStatus *string `type:"string" enum:"IndexStatus"`
+
+ // The number of items in the specified index. DynamoDB updates this value approximately
+ // every six hours. Recent changes might not be reflected in this value.
+ ItemCount *int64 `type:"long"`
+
+ // The complete key schema for a global secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []*KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndexDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndexDescription) GoString() string {
+ return s.String()
+}
+
+// SetBackfilling sets the Backfilling field's value.
+func (s *GlobalSecondaryIndexDescription) SetBackfilling(v bool) *GlobalSecondaryIndexDescription {
+ s.Backfilling = &v
+ return s
+}
+
+// SetIndexArn sets the IndexArn field's value.
+func (s *GlobalSecondaryIndexDescription) SetIndexArn(v string) *GlobalSecondaryIndexDescription {
+ s.IndexArn = &v
+ return s
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *GlobalSecondaryIndexDescription) SetIndexName(v string) *GlobalSecondaryIndexDescription {
+ s.IndexName = &v
+ return s
+}
+
+// SetIndexSizeBytes sets the IndexSizeBytes field's value.
+func (s *GlobalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *GlobalSecondaryIndexDescription {
+ s.IndexSizeBytes = &v
+ return s
+}
+
+// SetIndexStatus sets the IndexStatus field's value.
+func (s *GlobalSecondaryIndexDescription) SetIndexStatus(v string) *GlobalSecondaryIndexDescription {
+ s.IndexStatus = &v
+ return s
+}
+
+// SetItemCount sets the ItemCount field's value.
+func (s *GlobalSecondaryIndexDescription) SetItemCount(v int64) *GlobalSecondaryIndexDescription {
+ s.ItemCount = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexDescription {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription {
+ s.Projection = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *GlobalSecondaryIndexDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *GlobalSecondaryIndexDescription {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// Represents the properties of a global secondary index for the table when
+// the backup was created.
+type GlobalSecondaryIndexInfo struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The complete key schema for a global secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []*KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndexInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndexInfo) GoString() string {
+ return s.String()
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *GlobalSecondaryIndexInfo) SetIndexName(v string) *GlobalSecondaryIndexInfo {
+ s.IndexName = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexInfo {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo {
+ s.Projection = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndexInfo {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// Represents one of the following:
+//
+// * A new global secondary index to be added to an existing table.
+//
+// * New provisioned throughput parameters for an existing global secondary
+// index.
+//
+// * An existing global secondary index to be removed from an existing table.
+type GlobalSecondaryIndexUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The parameters required for creating a global secondary index on an existing
+ // table:
+ //
+ // * IndexName
+ //
+ // * KeySchema
+ //
+ // * AttributeDefinitions
+ //
+ // * Projection
+ //
+ // * ProvisionedThroughput
+ Create *CreateGlobalSecondaryIndexAction `type:"structure"`
+
+ // The name of an existing global secondary index to be removed.
+ Delete *DeleteGlobalSecondaryIndexAction `type:"structure"`
+
+ // The name of an existing global secondary index, along with new provisioned
+ // throughput settings to be applied to that index.
+ Update *UpdateGlobalSecondaryIndexAction `type:"structure"`
+}
+
+// String returns the string representation
+func (s GlobalSecondaryIndexUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalSecondaryIndexUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlobalSecondaryIndexUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"}
+ if s.Create != nil {
+ if err := s.Create.Validate(); err != nil {
+ invalidParams.AddNested("Create", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Update != nil {
+ if err := s.Update.Validate(); err != nil {
+ invalidParams.AddNested("Update", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCreate sets the Create field's value.
+func (s *GlobalSecondaryIndexUpdate) SetCreate(v *CreateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate {
+ s.Create = v
+ return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *GlobalSecondaryIndexUpdate) SetDelete(v *DeleteGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate {
+ s.Delete = v
+ return s
+}
+
+// SetUpdate sets the Update field's value.
+func (s *GlobalSecondaryIndexUpdate) SetUpdate(v *UpdateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate {
+ s.Update = v
+ return s
+}
+
+// Represents the properties of a global table.
+type GlobalTable struct {
+ _ struct{} `type:"structure"`
+
+ // The global table name.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The regions where the global table has replicas.
+ ReplicationGroup []*Replica `type:"list"`
+}
+
+// String returns the string representation
+func (s GlobalTable) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalTable) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *GlobalTable) SetGlobalTableName(v string) *GlobalTable {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetReplicationGroup sets the ReplicationGroup field's value.
+func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable {
+ s.ReplicationGroup = v
+ return s
+}
+
+// Contains details about the global table.
+type GlobalTableDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The creation time of the global table.
+ CreationDateTime *time.Time `type:"timestamp"`
+
+ // The unique identifier of the global table.
+ GlobalTableArn *string `type:"string"`
+
+ // The global table name.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The current state of the global table:
+ //
+ // * CREATING - The global table is being created.
+ //
+ // * UPDATING - The global table is being updated.
+ //
+ // * DELETING - The global table is being deleted.
+ //
+ // * ACTIVE - The global table is ready for use.
+ GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"`
+
+ // The regions where the global table has replicas.
+ ReplicationGroup []*ReplicaDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s GlobalTableDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalTableDescription) GoString() string {
+ return s.String()
+}
+
+// SetCreationDateTime sets the CreationDateTime field's value.
+func (s *GlobalTableDescription) SetCreationDateTime(v time.Time) *GlobalTableDescription {
+ s.CreationDateTime = &v
+ return s
+}
+
+// SetGlobalTableArn sets the GlobalTableArn field's value.
+func (s *GlobalTableDescription) SetGlobalTableArn(v string) *GlobalTableDescription {
+ s.GlobalTableArn = &v
+ return s
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *GlobalTableDescription) SetGlobalTableName(v string) *GlobalTableDescription {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetGlobalTableStatus sets the GlobalTableStatus field's value.
+func (s *GlobalTableDescription) SetGlobalTableStatus(v string) *GlobalTableDescription {
+ s.GlobalTableStatus = &v
+ return s
+}
+
+// SetReplicationGroup sets the ReplicationGroup field's value.
+func (s *GlobalTableDescription) SetReplicationGroup(v []*ReplicaDescription) *GlobalTableDescription {
+ s.ReplicationGroup = v
+ return s
+}
+
+// Represents the settings of a global secondary index for a global table that
+// will be modified.
+type GlobalTableGlobalSecondaryIndexSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // AutoScaling settings for managing a global secondary index's write capacity
+ // units.
+ ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1))
+ }
+ if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *GlobalTableGlobalSecondaryIndexSettingsUpdate {
+ s.IndexName = &v
+ return s
+}
+
+// SetProvisionedWriteCapacityAutoScalingSettingsUpdate sets the ProvisionedWriteCapacityAutoScalingSettingsUpdate field's value.
+func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *GlobalTableGlobalSecondaryIndexSettingsUpdate {
+ s.ProvisionedWriteCapacityAutoScalingSettingsUpdate = v
+ return s
+}
+
+// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value.
+func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityUnits(v int64) *GlobalTableGlobalSecondaryIndexSettingsUpdate {
+ s.ProvisionedWriteCapacityUnits = &v
+ return s
+}
+
+// Information about item collections, if any, that were affected by the operation.
+// ItemCollectionMetrics is only returned if the request asked for it. If the
+// table does not have any local secondary indexes, this information is not
+// returned in the response.
+type ItemCollectionMetrics struct {
+ _ struct{} `type:"structure"`
+
+ // The partition key value of the item collection. This value is the same as
+ // the partition key value of the item.
+ ItemCollectionKey map[string]*AttributeValue `type:"map"`
+
+ // An estimate of item collection size, in gigabytes. This value is a two-element
+ // array containing a lower bound and an upper bound for the estimate. The estimate
+ // includes the size of all the items in the table, plus the size of all attributes
+ // projected into all of the local secondary indexes on that table. Use this
+ // estimate to measure whether a local secondary index is approaching its size
+ // limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ SizeEstimateRangeGB []*float64 `type:"list"`
+}
+
+// String returns the string representation
+func (s ItemCollectionMetrics) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ItemCollectionMetrics) GoString() string {
+ return s.String()
+}
+
+// SetItemCollectionKey sets the ItemCollectionKey field's value.
+func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics {
+ s.ItemCollectionKey = v
+ return s
+}
+
+// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value.
+func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics {
+ s.SizeEstimateRangeGB = v
+ return s
+}
+
+// Details for the requested item.
+type ItemResponse struct {
+ _ struct{} `type:"structure"`
+
+ // Map of attribute data consisting of the data type and attribute value.
+ Item map[string]*AttributeValue `type:"map"`
+}
+
+// String returns the string representation
+func (s ItemResponse) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ItemResponse) GoString() string {
+ return s.String()
+}
+
+// SetItem sets the Item field's value.
+func (s *ItemResponse) SetItem(v map[string]*AttributeValue) *ItemResponse {
+ s.Item = v
+ return s
+}
+
+// Represents a single element of a key schema. A key schema specifies the attributes
+// that make up the primary key of a table, or the key attributes of an index.
+//
+// A KeySchemaElement represents exactly one attribute of the primary key. For
+// example, a simple primary key would be represented by one KeySchemaElement
+// (for the partition key). A composite primary key would require one KeySchemaElement
+// for the partition key, and another KeySchemaElement for the sort key.
+//
+// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).
+// The data type must be one of String, Number, or Binary. The attribute cannot
+// be nested within a List or a Map.
+type KeySchemaElement struct {
+ _ struct{} `type:"structure"`
+
+ // The name of a key attribute.
+ //
+ // AttributeName is a required field
+ AttributeName *string `min:"1" type:"string" required:"true"`
+
+ // The role that this key attribute will assume:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // KeyType is a required field
+ KeyType *string `type:"string" required:"true" enum:"KeyType"`
+}
+
+// String returns the string representation
+func (s KeySchemaElement) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeySchemaElement) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *KeySchemaElement) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"}
+ if s.AttributeName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeName != nil && len(*s.AttributeName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1))
+ }
+ if s.KeyType == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeyType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeName sets the AttributeName field's value.
+func (s *KeySchemaElement) SetAttributeName(v string) *KeySchemaElement {
+ s.AttributeName = &v
+ return s
+}
+
+// SetKeyType sets the KeyType field's value.
+func (s *KeySchemaElement) SetKeyType(v string) *KeySchemaElement {
+ s.KeyType = &v
+ return s
+}
+
+// Represents a set of primary keys and, for each key, the attributes to retrieve
+// from the table.
+//
+// For each primary key, you must provide all of the key attributes. For example,
+// with a simple primary key, you only need to provide the partition key. For
+// a composite primary key, you must provide both the partition key and the
+// sort key.
+type KeysAndAttributes struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []*string `min:"1" type:"list"`
+
+ // The consistency of a read operation. If set to true, then a strongly consistent
+ // read is used; otherwise, an eventually consistent read is used.
+ ConsistentRead *bool `type:"boolean"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Accessing Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // The primary key attribute values that define the items and the attributes
+ // associated with the items.
+ //
+ // Keys is a required field
+ Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"`
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document.
+ // The attributes in the ProjectionExpression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+}
+
+// String returns the string representation
+func (s KeysAndAttributes) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeysAndAttributes) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *KeysAndAttributes) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
+ }
+ if s.Keys == nil {
+ invalidParams.Add(request.NewErrParamRequired("Keys"))
+ }
+ if s.Keys != nil && len(s.Keys) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Keys", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributesToGet sets the AttributesToGet field's value.
+func (s *KeysAndAttributes) SetAttributesToGet(v []*string) *KeysAndAttributes {
+ s.AttributesToGet = v
+ return s
+}
+
+// SetConsistentRead sets the ConsistentRead field's value.
+func (s *KeysAndAttributes) SetConsistentRead(v bool) *KeysAndAttributes {
+ s.ConsistentRead = &v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *KeysAndAttributes) SetExpressionAttributeNames(v map[string]*string) *KeysAndAttributes {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetKeys sets the Keys field's value.
+func (s *KeysAndAttributes) SetKeys(v []map[string]*AttributeValue) *KeysAndAttributes {
+ s.Keys = v
+ return s
+}
+
+// SetProjectionExpression sets the ProjectionExpression field's value.
+func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes {
+ s.ProjectionExpression = &v
+ return s
+}
+
+type ListBackupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The backups from the table specified by BackupType are listed.
+ //
+ // Where BackupType can be:
+ //
+ // * USER - On-demand backup created by you.
+ //
+ // * SYSTEM - On-demand backup automatically created by DynamoDB.
+ //
+ // * ALL - All types of on-demand backups (USER and SYSTEM).
+ BackupType *string `type:"string" enum:"BackupTypeFilter"`
+
+ // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last
+ // evaluated when the current page of results was returned, inclusive of the
+ // current page of results. This value may be specified as the ExclusiveStartBackupArn
+ // of a new ListBackups operation in order to fetch the next page of results.
+ ExclusiveStartBackupArn *string `min:"37" type:"string"`
+
+ // Maximum number of backups to return at once.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // The backups from the table specified by TableName are listed.
+ TableName *string `min:"3" type:"string"`
+
+ // Only backups created after this time are listed. TimeRangeLowerBound is inclusive.
+ TimeRangeLowerBound *time.Time `type:"timestamp"`
+
+ // Only backups created before this time are listed. TimeRangeUpperBound is
+ // exclusive.
+ TimeRangeUpperBound *time.Time `type:"timestamp"`
+}
+
+// String returns the string representation
+func (s ListBackupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBackupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBackupsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBackupsInput"}
+ if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 {
+ invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartBackupArn", 37))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBackupType sets the BackupType field's value.
+func (s *ListBackupsInput) SetBackupType(v string) *ListBackupsInput {
+ s.BackupType = &v
+ return s
+}
+
+// SetExclusiveStartBackupArn sets the ExclusiveStartBackupArn field's value.
+func (s *ListBackupsInput) SetExclusiveStartBackupArn(v string) *ListBackupsInput {
+ s.ExclusiveStartBackupArn = &v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *ListBackupsInput) SetLimit(v int64) *ListBackupsInput {
+ s.Limit = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *ListBackupsInput) SetTableName(v string) *ListBackupsInput {
+ s.TableName = &v
+ return s
+}
+
+// SetTimeRangeLowerBound sets the TimeRangeLowerBound field's value.
+func (s *ListBackupsInput) SetTimeRangeLowerBound(v time.Time) *ListBackupsInput {
+ s.TimeRangeLowerBound = &v
+ return s
+}
+
+// SetTimeRangeUpperBound sets the TimeRangeUpperBound field's value.
+func (s *ListBackupsInput) SetTimeRangeUpperBound(v time.Time) *ListBackupsInput {
+ s.TimeRangeUpperBound = &v
+ return s
+}
+
+type ListBackupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // List of BackupSummary objects.
+ BackupSummaries []*BackupSummary `type:"list"`
+
+ // The ARN of the backup last evaluated when the current page of results was
+ // returned, inclusive of the current page of results. This value may be specified
+ // as the ExclusiveStartBackupArn of a new ListBackups operation in order to
+ // fetch the next page of results.
+ //
+ // If LastEvaluatedBackupArn is empty, then the last page of results has been
+ // processed and there are no more results to be retrieved.
+ //
+ // If LastEvaluatedBackupArn is not empty, this may or may not indicate that
+ // there is more data to be returned. All results are guaranteed to have been
+ // returned if and only if no value for LastEvaluatedBackupArn is returned.
+ LastEvaluatedBackupArn *string `min:"37" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBackupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBackupsOutput) GoString() string {
+ return s.String()
+}
+
+// SetBackupSummaries sets the BackupSummaries field's value.
+func (s *ListBackupsOutput) SetBackupSummaries(v []*BackupSummary) *ListBackupsOutput {
+ s.BackupSummaries = v
+ return s
+}
+
+// SetLastEvaluatedBackupArn sets the LastEvaluatedBackupArn field's value.
+func (s *ListBackupsOutput) SetLastEvaluatedBackupArn(v string) *ListBackupsOutput {
+ s.LastEvaluatedBackupArn = &v
+ return s
+}
+
+type ListGlobalTablesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The first global table name that this operation will evaluate.
+ ExclusiveStartGlobalTableName *string `min:"3" type:"string"`
+
+ // The maximum number of table names to return.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // Lists the global tables in a specific Region.
+ RegionName *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListGlobalTablesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListGlobalTablesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListGlobalTablesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListGlobalTablesInput"}
+ if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value.
+func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput {
+ s.ExclusiveStartGlobalTableName = &v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput {
+ s.Limit = &v
+ return s
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput {
+ s.RegionName = &v
+ return s
+}
+
+type ListGlobalTablesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // List of global table names.
+ GlobalTables []*GlobalTable `type:"list"`
+
+ // Last evaluated global table name.
+ LastEvaluatedGlobalTableName *string `min:"3" type:"string"`
+}
+
+// String returns the string representation
+func (s ListGlobalTablesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListGlobalTablesOutput) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTables sets the GlobalTables field's value.
+func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput {
+ s.GlobalTables = v
+ return s
+}
+
+// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value.
+func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput {
+ s.LastEvaluatedGlobalTableName = &v
+ return s
+}
+
+// Represents the input of a ListTables operation.
+type ListTablesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The first table name that this operation will evaluate. Use the value that
+ // was returned for LastEvaluatedTableName in a previous operation, so that
+ // you can obtain the next page of results.
+ ExclusiveStartTableName *string `min:"3" type:"string"`
+
+ // A maximum number of table names to return. If this parameter is not specified,
+ // the limit is 100.
+ Limit *int64 `min:"1" type:"integer"`
+}
+
+// String returns the string representation
+func (s ListTablesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTablesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListTablesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"}
+ if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetExclusiveStartTableName sets the ExclusiveStartTableName field's value.
+func (s *ListTablesInput) SetExclusiveStartTableName(v string) *ListTablesInput {
+ s.ExclusiveStartTableName = &v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *ListTablesInput) SetLimit(v int64) *ListTablesInput {
+ s.Limit = &v
+ return s
+}
+
+// Represents the output of a ListTables operation.
+type ListTablesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the last table in the current page of results. Use this value
+ // as the ExclusiveStartTableName in a new request to obtain the next page of
+ // results, until all the table names are returned.
+ //
+ // If you do not receive a LastEvaluatedTableName value in the response, this
+ // means that there are no more table names to be retrieved.
+ LastEvaluatedTableName *string `min:"3" type:"string"`
+
+ // The names of the tables associated with the current account at the current
+ // endpoint. The maximum size of this array is 100.
+ //
+ // If LastEvaluatedTableName also appears in the output, you can use this value
+ // as the ExclusiveStartTableName parameter in a subsequent ListTables request
+ // and obtain the next page of results.
+ TableNames []*string `type:"list"`
+}
+
+// String returns the string representation
+func (s ListTablesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTablesOutput) GoString() string {
+ return s.String()
+}
+
+// SetLastEvaluatedTableName sets the LastEvaluatedTableName field's value.
+func (s *ListTablesOutput) SetLastEvaluatedTableName(v string) *ListTablesOutput {
+ s.LastEvaluatedTableName = &v
+ return s
+}
+
+// SetTableNames sets the TableNames field's value.
+func (s *ListTablesOutput) SetTableNames(v []*string) *ListTablesOutput {
+ s.TableNames = v
+ return s
+}
+
+type ListTagsOfResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // An optional string that, if supplied, must be copied from the output of a
+ // previous call to ListTagOfResource. When provided in this manner, this API
+ // fetches the next page of results.
+ NextToken *string `type:"string"`
+
+ // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon
+ // Resource Name (ARN).
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListTagsOfResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTagsOfResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListTagsOfResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListTagsOfResourceInput"}
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListTagsOfResourceInput) SetNextToken(v string) *ListTagsOfResourceInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *ListTagsOfResourceInput) SetResourceArn(v string) *ListTagsOfResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+type ListTagsOfResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If this value is returned, there are additional results to be displayed.
+ // To retrieve them, call ListTagsOfResource again, with NextToken set to this
+ // value.
+ NextToken *string `type:"string"`
+
+ // The tags currently associated with the Amazon DynamoDB resource.
+ Tags []*Tag `type:"list"`
+}
+
+// String returns the string representation
+func (s ListTagsOfResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTagsOfResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListTagsOfResourceOutput) SetNextToken(v string) *ListTagsOfResourceOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *ListTagsOfResourceOutput) SetTags(v []*Tag) *ListTagsOfResourceOutput {
+ s.Tags = v
+ return s
+}
+
+// Represents the properties of a local secondary index.
+type LocalSecondaryIndex struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the local secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The complete key schema for the local secondary index, consisting of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // KeySchema is a required field
+ KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // local secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ //
+ // Projection is a required field
+ Projection *Projection `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s LocalSecondaryIndex) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LocalSecondaryIndex) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LocalSecondaryIndex) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.KeySchema == nil {
+ invalidParams.Add(request.NewErrParamRequired("KeySchema"))
+ }
+ if s.KeySchema != nil && len(s.KeySchema) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
+ }
+ if s.Projection == nil {
+ invalidParams.Add(request.NewErrParamRequired("Projection"))
+ }
+ if s.KeySchema != nil {
+ for i, v := range s.KeySchema {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.Projection != nil {
+ if err := s.Projection.Validate(); err != nil {
+ invalidParams.AddNested("Projection", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *LocalSecondaryIndex) SetIndexName(v string) *LocalSecondaryIndex {
+ s.IndexName = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *LocalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndex {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *LocalSecondaryIndex) SetProjection(v *Projection) *LocalSecondaryIndex {
+ s.Projection = v
+ return s
+}
+
+// Represents the properties of a local secondary index.
+type LocalSecondaryIndexDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the index.
+ IndexArn *string `type:"string"`
+
+ // Represents the name of the local secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The total size of the specified index, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ IndexSizeBytes *int64 `type:"long"`
+
+ // The number of items in the specified index. DynamoDB updates this value approximately
+ // every six hours. Recent changes might not be reflected in this value.
+ ItemCount *int64 `type:"long"`
+
+ // The complete key schema for the local secondary index, consisting of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []*KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+}
+
+// String returns the string representation
+func (s LocalSecondaryIndexDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LocalSecondaryIndexDescription) GoString() string {
+ return s.String()
+}
+
+// SetIndexArn sets the IndexArn field's value.
+func (s *LocalSecondaryIndexDescription) SetIndexArn(v string) *LocalSecondaryIndexDescription {
+ s.IndexArn = &v
+ return s
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *LocalSecondaryIndexDescription) SetIndexName(v string) *LocalSecondaryIndexDescription {
+ s.IndexName = &v
+ return s
+}
+
+// SetIndexSizeBytes sets the IndexSizeBytes field's value.
+func (s *LocalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *LocalSecondaryIndexDescription {
+ s.IndexSizeBytes = &v
+ return s
+}
+
+// SetItemCount sets the ItemCount field's value.
+func (s *LocalSecondaryIndexDescription) SetItemCount(v int64) *LocalSecondaryIndexDescription {
+ s.ItemCount = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *LocalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexDescription {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *LocalSecondaryIndexDescription) SetProjection(v *Projection) *LocalSecondaryIndexDescription {
+ s.Projection = v
+ return s
+}
+
+// Represents the properties of a local secondary index for the table when the
+// backup was created.
+type LocalSecondaryIndexInfo struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the name of the local secondary index.
+ IndexName *string `min:"3" type:"string"`
+
+ // The complete key schema for a local secondary index, which consists of one
+ // or more pairs of attribute names and key types:
+ //
+ // * HASH - partition key
+ //
+ // * RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB' usage of an internal hash function
+ // to evenly distribute data items across partitions, based on their partition
+ // key values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []*KeySchemaElement `min:"1" type:"list"`
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected.
+ Projection *Projection `type:"structure"`
+}
+
+// String returns the string representation
+func (s LocalSecondaryIndexInfo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LocalSecondaryIndexInfo) GoString() string {
+ return s.String()
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *LocalSecondaryIndexInfo) SetIndexName(v string) *LocalSecondaryIndexInfo {
+ s.IndexName = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *LocalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexInfo {
+ s.KeySchema = v
+ return s
+}
+
+// SetProjection sets the Projection field's value.
+func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIndexInfo {
+ s.Projection = v
+ return s
+}
+
+// The description of the point in time settings applied to the table.
+type PointInTimeRecoveryDescription struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the earliest point in time you can restore your table to. It You
+ // can restore your table to any point in time during the last 35 days.
+ EarliestRestorableDateTime *time.Time `type:"timestamp"`
+
+ // LatestRestorableDateTime is typically 5 minutes before the current time.
+ LatestRestorableDateTime *time.Time `type:"timestamp"`
+
+ // The current state of point in time recovery:
+ //
+ // * ENABLING - Point in time recovery is being enabled.
+ //
+ // * ENABLED - Point in time recovery is enabled.
+ //
+ // * DISABLED - Point in time recovery is disabled.
+ PointInTimeRecoveryStatus *string `type:"string" enum:"PointInTimeRecoveryStatus"`
+}
+
+// String returns the string representation
+func (s PointInTimeRecoveryDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PointInTimeRecoveryDescription) GoString() string {
+ return s.String()
+}
+
+// SetEarliestRestorableDateTime sets the EarliestRestorableDateTime field's value.
+func (s *PointInTimeRecoveryDescription) SetEarliestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription {
+ s.EarliestRestorableDateTime = &v
+ return s
+}
+
+// SetLatestRestorableDateTime sets the LatestRestorableDateTime field's value.
+func (s *PointInTimeRecoveryDescription) SetLatestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription {
+ s.LatestRestorableDateTime = &v
+ return s
+}
+
+// SetPointInTimeRecoveryStatus sets the PointInTimeRecoveryStatus field's value.
+func (s *PointInTimeRecoveryDescription) SetPointInTimeRecoveryStatus(v string) *PointInTimeRecoveryDescription {
+ s.PointInTimeRecoveryStatus = &v
+ return s
+}
+
+// Represents the settings used to enable point in time recovery.
+type PointInTimeRecoverySpecification struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether point in time recovery is enabled (true) or disabled (false)
+ // on the table.
+ //
+ // PointInTimeRecoveryEnabled is a required field
+ PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"`
+}
+
+// String returns the string representation
+func (s PointInTimeRecoverySpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PointInTimeRecoverySpecification) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PointInTimeRecoverySpecification) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"}
+ if s.PointInTimeRecoveryEnabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoveryEnabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPointInTimeRecoveryEnabled sets the PointInTimeRecoveryEnabled field's value.
+func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) *PointInTimeRecoverySpecification {
+ s.PointInTimeRecoveryEnabled = &v
+ return s
+}
+
+// Represents attributes that are copied (projected) from the table into an
+// index. These are in addition to the primary key attributes and index key
+// attributes, which are automatically projected.
+type Projection struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the non-key attribute names which will be projected into the index.
+ //
+ // For local secondary indexes, the total count of NonKeyAttributes summed across
+ // all of the local secondary indexes, must not exceed 20. If you project the
+ // same attribute into two different indexes, this counts as two distinct attributes
+ // when determining the total.
+ NonKeyAttributes []*string `min:"1" type:"list"`
+
+ // The set of attributes that are projected into the index:
+ //
+ // * KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // * INCLUDE - Only the specified table attributes are projected into the
+ // index. The list of projected attributes are in NonKeyAttributes.
+ //
+ // * ALL - All of the table attributes are projected into the index.
+ ProjectionType *string `type:"string" enum:"ProjectionType"`
+}
+
+// String returns the string representation
+func (s Projection) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Projection) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Projection) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Projection"}
+ if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetNonKeyAttributes sets the NonKeyAttributes field's value.
+func (s *Projection) SetNonKeyAttributes(v []*string) *Projection {
+ s.NonKeyAttributes = v
+ return s
+}
+
+// SetProjectionType sets the ProjectionType field's value.
+func (s *Projection) SetProjectionType(v string) *Projection {
+ s.ProjectionType = &v
+ return s
+}
+
+// Represents the provisioned throughput settings for a specified table or index.
+// The settings can be modified using the UpdateTable operation.
+//
+// For current minimum and maximum provisioned throughput values, see Limits
+// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+// in the Amazon DynamoDB Developer Guide.
+type ProvisionedThroughput struct {
+ _ struct{} `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. For more information, see Specifying
+ // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.
+ //
+ // ReadCapacityUnits is a required field
+ ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException. For more information, see Specifying Read and Write
+ // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.
+ //
+ // WriteCapacityUnits is a required field
+ WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"`
+}
+
+// String returns the string representation
+func (s ProvisionedThroughput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ProvisionedThroughput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ProvisionedThroughput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"}
+ if s.ReadCapacityUnits == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits"))
+ }
+ if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1))
+ }
+ if s.WriteCapacityUnits == nil {
+ invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits"))
+ }
+ if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
+func (s *ProvisionedThroughput) SetReadCapacityUnits(v int64) *ProvisionedThroughput {
+ s.ReadCapacityUnits = &v
+ return s
+}
+
+// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
+func (s *ProvisionedThroughput) SetWriteCapacityUnits(v int64) *ProvisionedThroughput {
+ s.WriteCapacityUnits = &v
+ return s
+}
+
+// Represents the provisioned throughput settings for the table, consisting
+// of read and write capacity units, along with data about increases and decreases.
+type ProvisionedThroughputDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The date and time of the last provisioned throughput decrease for this table.
+ LastDecreaseDateTime *time.Time `type:"timestamp"`
+
+ // The date and time of the last provisioned throughput increase for this table.
+ LastIncreaseDateTime *time.Time `type:"timestamp"`
+
+ // The number of provisioned throughput decreases for this table during this
+ // UTC calendar day. For current maximums on provisioned throughput decreases,
+ // see Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ NumberOfDecreasesToday *int64 `min:"1" type:"long"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. Eventually consistent reads require
+ // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits
+ // per second provides 100 eventually consistent ReadCapacityUnits per second.
+ ReadCapacityUnits *int64 `type:"long"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ WriteCapacityUnits *int64 `type:"long"`
+}
+
+// String returns the string representation
+func (s ProvisionedThroughputDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ProvisionedThroughputDescription) GoString() string {
+ return s.String()
+}
+
+// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value.
+func (s *ProvisionedThroughputDescription) SetLastDecreaseDateTime(v time.Time) *ProvisionedThroughputDescription {
+ s.LastDecreaseDateTime = &v
+ return s
+}
+
+// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value.
+func (s *ProvisionedThroughputDescription) SetLastIncreaseDateTime(v time.Time) *ProvisionedThroughputDescription {
+ s.LastIncreaseDateTime = &v
+ return s
+}
+
+// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value.
+func (s *ProvisionedThroughputDescription) SetNumberOfDecreasesToday(v int64) *ProvisionedThroughputDescription {
+ s.NumberOfDecreasesToday = &v
+ return s
+}
+
+// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
+func (s *ProvisionedThroughputDescription) SetReadCapacityUnits(v int64) *ProvisionedThroughputDescription {
+ s.ReadCapacityUnits = &v
+ return s
+}
+
+// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
+func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *ProvisionedThroughputDescription {
+ s.WriteCapacityUnits = &v
+ return s
+}
+
+// Represents a request to perform a PutItem operation.
+type Put struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ ConditionExpression *string `type:"string"`
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // A map of attribute name to attribute values, representing the primary key
+ // of the item to be written by PutItem. All of the table's primary key attributes
+ // must be specified, and their data types must match those of the table's key
+ // schema. If any attributes are present in the item that are part of an index
+ // key schema for the table, their types must match the index key schema.
+ //
+ // Item is a required field
+ Item map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
+ // Put condition fails. For ReturnValuesOnConditionCheckFailure, the valid values
+ // are: NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
+
+ // Name of the table in which to write the item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Put) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Put) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Put) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Put"}
+ if s.Item == nil {
+ invalidParams.Add(request.NewErrParamRequired("Item"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *Put) SetConditionExpression(v string) *Put {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *Put) SetExpressionAttributeNames(v map[string]*string) *Put {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *Put) SetExpressionAttributeValues(v map[string]*AttributeValue) *Put {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetItem sets the Item field's value.
+func (s *Put) SetItem(v map[string]*AttributeValue) *Put {
+ s.Item = v
+ return s
+}
+
+// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
+func (s *Put) SetReturnValuesOnConditionCheckFailure(v string) *Put {
+ s.ReturnValuesOnConditionCheckFailure = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *Put) SetTableName(v string) *Put {
+ s.TableName = &v
+ return s
+}
+
+// Represents the input of a PutItem operation.
+type PutItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional PutItem operation
+ // to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // * Functions: attribute_exists | attribute_not_exists | attribute_type
+ // | contains | begins_with | size These function names are case-sensitive.
+ //
+ // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // * Logical operators: AND | OR | NOT
+ //
+ // For more information on condition expressions, see Condition Expressions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Expected map[string]*ExpectedAttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Specifying Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Condition Expressions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // A map of attribute name/value pairs, one for each attribute. Only the primary
+ // key attributes are required; you can optionally provide other attribute name-value
+ // pairs for the item.
+ //
+ // You must provide all of the attributes for the primary key. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide both values for both the
+ // partition key and the sort key.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // Each element in the Item map is an AttributeValue object.
+ //
+ // Item is a required field
+ Item map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
+
+ // Use ReturnValues if you want to get the item attributes as they appeared
+ // before they were updated with the PutItem request. For PutItem, the valid
+ // values are:
+ //
+ // * NONE - If ReturnValues is not specified, or if its value is NONE, then
+ // nothing is returned. (This setting is the default for ReturnValues.)
+ //
+ // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the
+ // content of the old item is returned.
+ //
+ // The ReturnValues parameter is used by several DynamoDB operations; however,
+ // PutItem does not recognize any values other than NONE or ALL_OLD.
+ ReturnValues *string `type:"string" enum:"ReturnValue"`
+
+ // The name of the table to contain the item.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutItemInput"}
+ if s.Item == nil {
+ invalidParams.Add(request.NewErrParamRequired("Item"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *PutItemInput) SetConditionExpression(v string) *PutItemInput {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetConditionalOperator sets the ConditionalOperator field's value.
+func (s *PutItemInput) SetConditionalOperator(v string) *PutItemInput {
+ s.ConditionalOperator = &v
+ return s
+}
+
+// SetExpected sets the Expected field's value.
+func (s *PutItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *PutItemInput {
+ s.Expected = v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *PutItemInput) SetExpressionAttributeNames(v map[string]*string) *PutItemInput {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *PutItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *PutItemInput {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetItem sets the Item field's value.
+func (s *PutItemInput) SetItem(v map[string]*AttributeValue) *PutItemInput {
+ s.Item = v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *PutItemInput) SetReturnConsumedCapacity(v string) *PutItemInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
+func (s *PutItemInput) SetReturnItemCollectionMetrics(v string) *PutItemInput {
+ s.ReturnItemCollectionMetrics = &v
+ return s
+}
+
+// SetReturnValues sets the ReturnValues field's value.
+func (s *PutItemInput) SetReturnValues(v string) *PutItemInput {
+ s.ReturnValues = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *PutItemInput) SetTableName(v string) *PutItemInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of a PutItem operation.
+type PutItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The attribute values as they appeared before the PutItem operation, but only
+ // if ReturnValues is specified as ALL_OLD in the request. Each element consists
+ // of an attribute name and an attribute value.
+ Attributes map[string]*AttributeValue `type:"map"`
+
+ // The capacity units consumed by the PutItem operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // Information about item collections, if any, that were affected by the PutItem
+ // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
+ // parameter was specified. If the table does not have any local secondary indexes,
+ // this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item itself.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on that table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit. The estimate is
+ // subject to change over time; therefore, do not rely on the precision or
+ // accuracy of the estimate.
+ ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutItemOutput) GoString() string {
+ return s.String()
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *PutItemOutput) SetAttributes(v map[string]*AttributeValue) *PutItemOutput {
+ s.Attributes = v
+ return s
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *PutItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *PutItemOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
+func (s *PutItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *PutItemOutput {
+ s.ItemCollectionMetrics = v
+ return s
+}
+
+// Represents a request to perform a PutItem operation on an item.
+type PutRequest struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attribute name to attribute values, representing the primary key
+ // of an item to be processed by PutItem. All of the table's primary key attributes
+ // must be specified, and their data types must match those of the table's key
+ // schema. If any attributes are present in the item which are part of an index
+ // key schema for the table, their types must match the index key schema.
+ //
+ // Item is a required field
+ Item map[string]*AttributeValue `type:"map" required:"true"`
+}
+
+// String returns the string representation
+func (s PutRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutRequest) GoString() string {
+ return s.String()
+}
+
+// SetItem sets the Item field's value.
+func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest {
+ s.Item = v
+ return s
+}
+
+// Represents the input of a Query operation.
+type QueryInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []*string `min:"1" type:"list"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
+
+ // Determines the read consistency model: If set to true, then the operation
+ // uses strongly consistent reads; otherwise, the operation uses eventually
+ // consistent reads.
+ //
+ // Strongly consistent reads are not supported on global secondary indexes.
+ // If you query a global secondary index with ConsistentRead set to true, you
+ // will receive a ValidationException.
+ ConsistentRead *bool `type:"boolean"`
+
+ // The primary key of the first item that this operation will evaluate. Use
+ // the value that was returned for LastEvaluatedKey in the previous operation.
+ //
+ // The data type for ExclusiveStartKey must be String, Number, or Binary. No
+ // set data types are allowed.
+ ExclusiveStartKey map[string]*AttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Specifying Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Specifying Conditions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // A string that contains conditions that DynamoDB applies after the Query operation,
+ // but before the data is returned to you. Items that do not satisfy the FilterExpression
+ // criteria are not returned.
+ //
+ // A FilterExpression does not allow key attributes. You cannot define a filter
+ // expression based on a partition key or a sort key.
+ //
+ // A FilterExpression is applied after the items have already been read; the
+ // process of filtering does not consume any additional read capacity units.
+ //
+ // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults)
+ // in the Amazon DynamoDB Developer Guide.
+ FilterExpression *string `type:"string"`
+
+ // The name of an index to query. This index can be any local secondary index
+ // or global secondary index on the table. Note that if you use the IndexName
+ // parameter, you must also provide TableName.
+ IndexName *string `min:"3" type:"string"`
+
+ // The condition that specifies the key values for items to be retrieved by
+ // the Query action.
+ //
+ // The condition must perform an equality test on a single partition key value.
+ //
+ // The condition can optionally perform one of several comparison tests on a
+ // single sort key value. This allows Query to retrieve one item with a given
+ // partition key value and sort key value, or several items that have the same
+ // partition key value but different sort key values.
+ //
+ // The partition key equality test is required, and must be specified in the
+ // following format:
+ //
+ // partitionKeyName = :partitionkeyval
+ //
+ // If you also want to provide a condition for the sort key, it must be combined
+ // using AND with the condition for the sort key. Following is an example, using
+ // the = comparison operator for the sort key:
+ //
+ // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval
+ //
+ // Valid comparisons for the sort key condition are as follows:
+ //
+ // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval.
+ //
+ // * sortKeyName < :sortkeyval - true if the sort key value is less than
+ // :sortkeyval.
+ //
+ // * sortKeyName <= :sortkeyval - true if the sort key value is less than
+ // or equal to :sortkeyval.
+ //
+ // * sortKeyName > :sortkeyval - true if the sort key value is greater than
+ // :sortkeyval.
+ //
+ // * sortKeyName >= :sortkeyval - true if the sort key value is greater than
+ // or equal to :sortkeyval.
+ //
+ // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort
+ // key value is greater than or equal to :sortkeyval1, and less than or equal
+ // to :sortkeyval2.
+ //
+ // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value
+ // begins with a particular operand. (You cannot use this function with a
+ // sort key that is of type Number.) Note that the function name begins_with
+ // is case-sensitive.
+ //
+ // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval
+ // and :sortval with actual values at runtime.
+ //
+ // You can optionally use the ExpressionAttributeNames parameter to replace
+ // the names of the partition key and sort key with placeholder tokens. This
+ // option might be necessary if an attribute name conflicts with a DynamoDB
+ // reserved word. For example, the following KeyConditionExpression parameter
+ // causes an error because Size is a reserved word:
+ //
+ // * Size = :myval
+ //
+ // To work around this, define a placeholder (such a #S) to represent the attribute
+ // name Size. KeyConditionExpression then is as follows:
+ //
+ // * #S = :myval
+ //
+ // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // For more information on ExpressionAttributeNames and ExpressionAttributeValues,
+ // see Using Placeholders for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html)
+ // in the Amazon DynamoDB Developer Guide.
+ KeyConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use KeyConditionExpression instead. For more
+ // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ KeyConditions map[string]*Condition `type:"map"`
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while processing
+ // the results, it stops the operation and returns the matching values up to
+ // that point, and a key in LastEvaluatedKey to apply in a subsequent operation,
+ // so that you can pick up where you left off. Also, if the processed dataset
+ // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
+ // and returns the matching values up to the limit, and a key in LastEvaluatedKey
+ // to apply in a subsequent operation to continue the operation. For more information,
+ // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document.
+ // The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html)
+ // in the Amazon DynamoDB Developer Guide.
+ QueryFilter map[string]*Condition `type:"map"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // Specifies the order for index traversal: If true (default), the traversal
+ // is performed in ascending order; if false, the traversal is performed in
+ // descending order.
+ //
+ // Items with the same partition key value are stored in sorted order by sort
+ // key. If the sort key data type is Number, the results are stored in numeric
+ // order. For type String, the results are stored in order of UTF-8 bytes. For
+ // type Binary, DynamoDB treats each byte of the binary data as unsigned.
+ //
+ // If ScanIndexForward is true, DynamoDB returns the results in the order in
+ // which they are stored (by sort key value). This is the default behavior.
+ // If ScanIndexForward is false, DynamoDB reads the results in reverse order
+ // by sort key value, and then returns the results to the client.
+ ScanIndexForward *bool `type:"boolean"`
+
+ // The attributes to be returned in the result. You can retrieve all item attributes,
+ // specific item attributes, the count of matching items, or in the case of
+ // an index, some or all of the attributes projected into the index.
+ //
+ // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified
+ // table or index. If you query a local secondary index, then for each matching
+ // item in the index, DynamoDB fetches the entire item from the parent table.
+ // If the index is configured to project all item attributes, then all of
+ // the data can be obtained from the local secondary index, and no fetching
+ // is required.
+ //
+ // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
+ // all attributes that have been projected into the index. If the index is
+ // configured to project all attributes, this return value is equivalent
+ // to specifying ALL_ATTRIBUTES.
+ //
+ // * COUNT - Returns the number of matching items, rather than the matching
+ // items themselves.
+ //
+ // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet.
+ // This return value is equivalent to specifying AttributesToGet without
+ // specifying any value for Select. If you query or scan a local secondary
+ // index and request only attributes that are projected into that index,
+ // the operation will read only the index and not the table. If any of the
+ // requested attributes are not projected into the local secondary index,
+ // DynamoDB fetches each of these attributes from the parent table. This
+ // extra fetching incurs additional throughput cost and latency. If you query
+ // or scan a global secondary index, you can only request attributes that
+ // are projected into the index. Global secondary index queries cannot fetch
+ // attributes from the parent table.
+ //
+ // If neither Select nor AttributesToGet are specified, DynamoDB defaults to
+ // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
+ // accessing an index. You cannot use both Select and AttributesToGet together
+ // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES.
+ // (This usage is equivalent to specifying AttributesToGet without any value
+ // for Select.)
+ //
+ // If you use the ProjectionExpression parameter, then the value for Select
+ // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an
+ // error.
+ Select *string `type:"string" enum:"Select"`
+
+ // The name of the table containing the requested items.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s QueryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *QueryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "QueryInput"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.KeyConditions != nil {
+ for i, v := range s.KeyConditions {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.QueryFilter != nil {
+ for i, v := range s.QueryFilter {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributesToGet sets the AttributesToGet field's value.
+func (s *QueryInput) SetAttributesToGet(v []*string) *QueryInput {
+ s.AttributesToGet = v
+ return s
+}
+
+// SetConditionalOperator sets the ConditionalOperator field's value.
+func (s *QueryInput) SetConditionalOperator(v string) *QueryInput {
+ s.ConditionalOperator = &v
+ return s
+}
+
+// SetConsistentRead sets the ConsistentRead field's value.
+func (s *QueryInput) SetConsistentRead(v bool) *QueryInput {
+ s.ConsistentRead = &v
+ return s
+}
+
+// SetExclusiveStartKey sets the ExclusiveStartKey field's value.
+func (s *QueryInput) SetExclusiveStartKey(v map[string]*AttributeValue) *QueryInput {
+ s.ExclusiveStartKey = v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *QueryInput) SetExpressionAttributeNames(v map[string]*string) *QueryInput {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *QueryInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *QueryInput {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetFilterExpression sets the FilterExpression field's value.
+func (s *QueryInput) SetFilterExpression(v string) *QueryInput {
+ s.FilterExpression = &v
+ return s
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *QueryInput) SetIndexName(v string) *QueryInput {
+ s.IndexName = &v
+ return s
+}
+
+// SetKeyConditionExpression sets the KeyConditionExpression field's value.
+func (s *QueryInput) SetKeyConditionExpression(v string) *QueryInput {
+ s.KeyConditionExpression = &v
+ return s
+}
+
+// SetKeyConditions sets the KeyConditions field's value.
+func (s *QueryInput) SetKeyConditions(v map[string]*Condition) *QueryInput {
+ s.KeyConditions = v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *QueryInput) SetLimit(v int64) *QueryInput {
+ s.Limit = &v
+ return s
+}
+
+// SetProjectionExpression sets the ProjectionExpression field's value.
+func (s *QueryInput) SetProjectionExpression(v string) *QueryInput {
+ s.ProjectionExpression = &v
+ return s
+}
+
+// SetQueryFilter sets the QueryFilter field's value.
+func (s *QueryInput) SetQueryFilter(v map[string]*Condition) *QueryInput {
+ s.QueryFilter = v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *QueryInput) SetReturnConsumedCapacity(v string) *QueryInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetScanIndexForward sets the ScanIndexForward field's value.
+func (s *QueryInput) SetScanIndexForward(v bool) *QueryInput {
+ s.ScanIndexForward = &v
+ return s
+}
+
+// SetSelect sets the Select field's value.
+func (s *QueryInput) SetSelect(v string) *QueryInput {
+ s.Select = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *QueryInput) SetTableName(v string) *QueryInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of a Query operation.
+type QueryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The capacity units consumed by the Query operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // The number of items in the response.
+ //
+ // If you used a QueryFilter in the request, then Count is the number of items
+ // returned after the filter was applied, and ScannedCount is the number of
+ // matching items before the filter was applied.
+ //
+ // If you did not use a filter in the request, then Count and ScannedCount are
+ // the same.
+ Count *int64 `type:"integer"`
+
+ // An array of item attributes that match the query criteria. Each element in
+ // this array consists of an attribute name and the value for that attribute.
+ Items []map[string]*AttributeValue `type:"list"`
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request.
+ //
+ // If LastEvaluatedKey is empty, then the "last page" of results has been processed
+ // and there is no more data to be retrieved.
+ //
+ // If LastEvaluatedKey is not empty, it does not necessarily mean that there
+ // is more data in the result set. The only way to know when you have reached
+ // the end of the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]*AttributeValue `type:"map"`
+
+ // The number of items evaluated, before any QueryFilter is applied. A high
+ // ScannedCount value with few, or no, Count results indicates an inefficient
+ // Query operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // If you did not use a filter in the request, then ScannedCount is the same
+ // as Count.
+ ScannedCount *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s QueryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueryOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *QueryOutput) SetConsumedCapacity(v *ConsumedCapacity) *QueryOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetCount sets the Count field's value.
+func (s *QueryOutput) SetCount(v int64) *QueryOutput {
+ s.Count = &v
+ return s
+}
+
+// SetItems sets the Items field's value.
+func (s *QueryOutput) SetItems(v []map[string]*AttributeValue) *QueryOutput {
+ s.Items = v
+ return s
+}
+
+// SetLastEvaluatedKey sets the LastEvaluatedKey field's value.
+func (s *QueryOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *QueryOutput {
+ s.LastEvaluatedKey = v
+ return s
+}
+
+// SetScannedCount sets the ScannedCount field's value.
+func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput {
+ s.ScannedCount = &v
+ return s
+}
+
+// Represents the properties of a replica.
+type Replica struct {
+ _ struct{} `type:"structure"`
+
+ // The region where the replica needs to be created.
+ RegionName *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Replica) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Replica) GoString() string {
+ return s.String()
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *Replica) SetRegionName(v string) *Replica {
+ s.RegionName = &v
+ return s
+}
+
+// Contains the details of the replica.
+type ReplicaDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the region.
+ RegionName *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ReplicaDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaDescription) GoString() string {
+ return s.String()
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription {
+ s.RegionName = &v
+ return s
+}
+
+// Represents the properties of a global secondary index.
+type ReplicaGlobalSecondaryIndexSettingsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // The current status of the global secondary index:
+ //
+ // * CREATING - The global secondary index is being created.
+ //
+ // * UPDATING - The global secondary index is being updated.
+ //
+ // * DELETING - The global secondary index is being deleted.
+ //
+ // * ACTIVE - The global secondary index is ready for use.
+ IndexStatus *string `type:"string" enum:"IndexStatus"`
+
+ // Autoscaling settings for a global secondary index replica's read capacity
+ // units.
+ ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException.
+ ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+
+ // AutoScaling settings for a global secondary index replica's write capacity
+ // units.
+ ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string {
+ return s.String()
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsDescription {
+ s.IndexName = &v
+ return s
+}
+
+// SetIndexStatus sets the IndexStatus field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexSettingsDescription {
+ s.IndexStatus = &v
+ return s
+}
+
+// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription {
+ s.ProvisionedReadCapacityAutoScalingSettings = v
+ return s
+}
+
+// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription {
+ s.ProvisionedReadCapacityUnits = &v
+ return s
+}
+
+// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription {
+ s.ProvisionedWriteCapacityAutoScalingSettings = v
+ return s
+}
+
+// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription {
+ s.ProvisionedWriteCapacityUnits = &v
+ return s
+}
+
+// Represents the settings of a global secondary index for a global table that
+// will be modified.
+type ReplicaGlobalSecondaryIndexSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index. The name must be unique among all
+ // other indexes on this table.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // Autoscaling settings for managing a global secondary index replica's read
+ // capacity units.
+ ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException.
+ ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1))
+ }
+ if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsUpdate {
+ s.IndexName = &v
+ return s
+}
+
+// SetProvisionedReadCapacityAutoScalingSettingsUpdate sets the ProvisionedReadCapacityAutoScalingSettingsUpdate field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexSettingsUpdate {
+ s.ProvisionedReadCapacityAutoScalingSettingsUpdate = v
+ return s
+}
+
+// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value.
+func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsUpdate {
+ s.ProvisionedReadCapacityUnits = &v
+ return s
+}
+
+// Represents the properties of a replica.
+type ReplicaSettingsDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The region name of the replica.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+
+ // The read/write capacity mode of the replica.
+ ReplicaBillingModeSummary *BillingModeSummary `type:"structure"`
+
+ // Replica global secondary index settings for the global table.
+ ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"`
+
+ // Autoscaling settings for a global table replica's read capacity units.
+ ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. For more information, see Specifying
+ // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ ReplicaProvisionedReadCapacityUnits *int64 `type:"long"`
+
+ // AutoScaling settings for a global table replica's write capacity units.
+ ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException. For more information, see Specifying Read and Write
+ // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"`
+
+ // The current state of the region:
+ //
+ // * CREATING - The region is being created.
+ //
+ // * UPDATING - The region is being updated.
+ //
+ // * DELETING - The region is being deleted.
+ //
+ // * ACTIVE - The region is ready for use.
+ ReplicaStatus *string `type:"string" enum:"ReplicaStatus"`
+}
+
+// String returns the string representation
+func (s ReplicaSettingsDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaSettingsDescription) GoString() string {
+ return s.String()
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *ReplicaSettingsDescription) SetRegionName(v string) *ReplicaSettingsDescription {
+ s.RegionName = &v
+ return s
+}
+
+// SetReplicaBillingModeSummary sets the ReplicaBillingModeSummary field's value.
+func (s *ReplicaSettingsDescription) SetReplicaBillingModeSummary(v *BillingModeSummary) *ReplicaSettingsDescription {
+ s.ReplicaBillingModeSummary = v
+ return s
+}
+
+// SetReplicaGlobalSecondaryIndexSettings sets the ReplicaGlobalSecondaryIndexSettings field's value.
+func (s *ReplicaSettingsDescription) SetReplicaGlobalSecondaryIndexSettings(v []*ReplicaGlobalSecondaryIndexSettingsDescription) *ReplicaSettingsDescription {
+ s.ReplicaGlobalSecondaryIndexSettings = v
+ return s
+}
+
+// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value.
+func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription {
+ s.ReplicaProvisionedReadCapacityAutoScalingSettings = v
+ return s
+}
+
+// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value.
+func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsDescription {
+ s.ReplicaProvisionedReadCapacityUnits = &v
+ return s
+}
+
+// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value.
+func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription {
+ s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v
+ return s
+}
+
+// SetReplicaProvisionedWriteCapacityUnits sets the ReplicaProvisionedWriteCapacityUnits field's value.
+func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityUnits(v int64) *ReplicaSettingsDescription {
+ s.ReplicaProvisionedWriteCapacityUnits = &v
+ return s
+}
+
+// SetReplicaStatus sets the ReplicaStatus field's value.
+func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettingsDescription {
+ s.ReplicaStatus = &v
+ return s
+}
+
+// Represents the settings for a global table in a region that will be modified.
+type ReplicaSettingsUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The region of the replica to be added.
+ //
+ // RegionName is a required field
+ RegionName *string `type:"string" required:"true"`
+
+ // Represents the settings of a global secondary index for a global table that
+ // will be modified.
+ ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"`
+
+ // Autoscaling settings for managing a global table replica's read capacity
+ // units.
+ ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException. For more information, see Specifying
+ // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
+ // in the Amazon DynamoDB Developer Guide.
+ ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
+}
+
+// String returns the string representation
+func (s ReplicaSettingsUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaSettingsUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicaSettingsUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicaSettingsUpdate"}
+ if s.RegionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RegionName"))
+ }
+ if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1))
+ }
+ if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1))
+ }
+ if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil {
+ for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegionName sets the RegionName field's value.
+func (s *ReplicaSettingsUpdate) SetRegionName(v string) *ReplicaSettingsUpdate {
+ s.RegionName = &v
+ return s
+}
+
+// SetReplicaGlobalSecondaryIndexSettingsUpdate sets the ReplicaGlobalSecondaryIndexSettingsUpdate field's value.
+func (s *ReplicaSettingsUpdate) SetReplicaGlobalSecondaryIndexSettingsUpdate(v []*ReplicaGlobalSecondaryIndexSettingsUpdate) *ReplicaSettingsUpdate {
+ s.ReplicaGlobalSecondaryIndexSettingsUpdate = v
+ return s
+}
+
+// SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate sets the ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate field's value.
+func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaSettingsUpdate {
+ s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate = v
+ return s
+}
+
+// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value.
+func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsUpdate {
+ s.ReplicaProvisionedReadCapacityUnits = &v
+ return s
+}
+
+// Represents one of the following:
+//
+// * A new replica to be added to an existing global table.
+//
+// * New parameters for an existing replica.
+//
+// * An existing replica to be removed from an existing global table.
+type ReplicaUpdate struct {
+ _ struct{} `type:"structure"`
+
+ // The parameters required for creating a replica on an existing global table.
+ Create *CreateReplicaAction `type:"structure"`
+
+ // The name of the existing replica to be removed.
+ Delete *DeleteReplicaAction `type:"structure"`
+}
+
+// String returns the string representation
+func (s ReplicaUpdate) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicaUpdate) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicaUpdate) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicaUpdate"}
+ if s.Create != nil {
+ if err := s.Create.Validate(); err != nil {
+ invalidParams.AddNested("Create", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetCreate sets the Create field's value.
+func (s *ReplicaUpdate) SetCreate(v *CreateReplicaAction) *ReplicaUpdate {
+ s.Create = v
+ return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate {
+ s.Delete = v
+ return s
+}
+
+// Contains details for the restore.
+type RestoreSummary struct {
+ _ struct{} `type:"structure"`
+
+ // Point in time or source backup time.
+ //
+ // RestoreDateTime is a required field
+ RestoreDateTime *time.Time `type:"timestamp" required:"true"`
+
+ // Indicates if a restore is in progress or not.
+ //
+ // RestoreInProgress is a required field
+ RestoreInProgress *bool `type:"boolean" required:"true"`
+
+ // ARN of the backup from which the table was restored.
+ SourceBackupArn *string `min:"37" type:"string"`
+
+ // ARN of the source table of the backup that is being restored.
+ SourceTableArn *string `type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreSummary) GoString() string {
+ return s.String()
+}
+
+// SetRestoreDateTime sets the RestoreDateTime field's value.
+func (s *RestoreSummary) SetRestoreDateTime(v time.Time) *RestoreSummary {
+ s.RestoreDateTime = &v
+ return s
+}
+
+// SetRestoreInProgress sets the RestoreInProgress field's value.
+func (s *RestoreSummary) SetRestoreInProgress(v bool) *RestoreSummary {
+ s.RestoreInProgress = &v
+ return s
+}
+
+// SetSourceBackupArn sets the SourceBackupArn field's value.
+func (s *RestoreSummary) SetSourceBackupArn(v string) *RestoreSummary {
+ s.SourceBackupArn = &v
+ return s
+}
+
+// SetSourceTableArn sets the SourceTableArn field's value.
+func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary {
+ s.SourceTableArn = &v
+ return s
+}
+
+type RestoreTableFromBackupInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) associated with the backup.
+ //
+ // BackupArn is a required field
+ BackupArn *string `min:"37" type:"string" required:"true"`
+
+ // The name of the new table to which the backup must be restored.
+ //
+ // TargetTableName is a required field
+ TargetTableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s RestoreTableFromBackupInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableFromBackupInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreTableFromBackupInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromBackupInput"}
+ if s.BackupArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("BackupArn"))
+ }
+ if s.BackupArn != nil && len(*s.BackupArn) < 37 {
+ invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37))
+ }
+ if s.TargetTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetTableName"))
+ }
+ if s.TargetTableName != nil && len(*s.TargetTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBackupArn sets the BackupArn field's value.
+func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBackupInput {
+ s.BackupArn = &v
+ return s
+}
+
+// SetTargetTableName sets the TargetTableName field's value.
+func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput {
+ s.TargetTableName = &v
+ return s
+}
+
+type RestoreTableFromBackupOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The description of the table created from an existing backup.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s RestoreTableFromBackupOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableFromBackupOutput) GoString() string {
+ return s.String()
+}
+
+// SetTableDescription sets the TableDescription field's value.
+func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) *RestoreTableFromBackupOutput {
+ s.TableDescription = v
+ return s
+}
+
+type RestoreTableToPointInTimeInput struct {
+ _ struct{} `type:"structure"`
+
+ // Time in the past to restore the table to.
+ RestoreDateTime *time.Time `type:"timestamp"`
+
+ // Name of the source table that is being restored.
+ //
+ // SourceTableName is a required field
+ SourceTableName *string `min:"3" type:"string" required:"true"`
+
+ // The name of the new table to which it must be restored to.
+ //
+ // TargetTableName is a required field
+ TargetTableName *string `min:"3" type:"string" required:"true"`
+
+ // Restore the table to the latest possible time. LatestRestorableDateTime is
+ // typically 5 minutes before the current time.
+ UseLatestRestorableTime *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s RestoreTableToPointInTimeInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableToPointInTimeInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreTableToPointInTimeInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"}
+ if s.SourceTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("SourceTableName"))
+ }
+ if s.SourceTableName != nil && len(*s.SourceTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3))
+ }
+ if s.TargetTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TargetTableName"))
+ }
+ if s.TargetTableName != nil && len(*s.TargetTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRestoreDateTime sets the RestoreDateTime field's value.
+func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput {
+ s.RestoreDateTime = &v
+ return s
+}
+
+// SetSourceTableName sets the SourceTableName field's value.
+func (s *RestoreTableToPointInTimeInput) SetSourceTableName(v string) *RestoreTableToPointInTimeInput {
+ s.SourceTableName = &v
+ return s
+}
+
+// SetTargetTableName sets the TargetTableName field's value.
+func (s *RestoreTableToPointInTimeInput) SetTargetTableName(v string) *RestoreTableToPointInTimeInput {
+ s.TargetTableName = &v
+ return s
+}
+
+// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value.
+func (s *RestoreTableToPointInTimeInput) SetUseLatestRestorableTime(v bool) *RestoreTableToPointInTimeInput {
+ s.UseLatestRestorableTime = &v
+ return s
+}
+
+type RestoreTableToPointInTimeOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the properties of a table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s RestoreTableToPointInTimeOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreTableToPointInTimeOutput) GoString() string {
+ return s.String()
+}
+
+// SetTableDescription sets the TableDescription field's value.
+func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescription) *RestoreTableToPointInTimeOutput {
+ s.TableDescription = v
+ return s
+}
+
+// The description of the server-side encryption status on the specified table.
+type SSEDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The KMS customer master key (CMK) ARN used for the KMS encryption.
+ KMSMasterKeyArn *string `type:"string"`
+
+ // Server-side encryption type. The only supported value is:
+ //
+ // * KMS - Server-side encryption which uses AWS Key Management Service.
+ // Key is stored in your account and is managed by AWS KMS (KMS charges apply).
+ SSEType *string `type:"string" enum:"SSEType"`
+
+ // Represents the current state of server-side encryption. The only supported
+ // values are:
+ //
+ // * ENABLED - Server-side encryption is enabled.
+ //
+ // * UPDATING - Server-side encryption is being updated.
+ Status *string `type:"string" enum:"SSEStatus"`
+}
+
+// String returns the string representation
+func (s SSEDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SSEDescription) GoString() string {
+ return s.String()
+}
+
+// SetKMSMasterKeyArn sets the KMSMasterKeyArn field's value.
+func (s *SSEDescription) SetKMSMasterKeyArn(v string) *SSEDescription {
+ s.KMSMasterKeyArn = &v
+ return s
+}
+
+// SetSSEType sets the SSEType field's value.
+func (s *SSEDescription) SetSSEType(v string) *SSEDescription {
+ s.SSEType = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *SSEDescription) SetStatus(v string) *SSEDescription {
+ s.Status = &v
+ return s
+}
+
+// Represents the settings used to enable server-side encryption.
+type SSESpecification struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether server-side encryption is done using an AWS managed CMK
+ // or an AWS owned CMK. If enabled (true), server-side encryption type is set
+ // to KMS and an AWS managed CMK is used (AWS KMS charges apply). If disabled
+ // (false) or not specified, server-side encryption is set to AWS owned CMK.
+ Enabled *bool `type:"boolean"`
+
+ // The KMS Customer Master Key (CMK) which should be used for the KMS encryption.
+ // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name,
+ // or alias ARN. Note that you should only provide this parameter if the key
+ // is different from the default DynamoDB Customer Master Key alias/aws/dynamodb.
+ KMSMasterKeyId *string `type:"string"`
+
+ // Server-side encryption type. The only supported value is:
+ //
+ // * KMS - Server-side encryption which uses AWS Key Management Service.
+ // Key is stored in your account and is managed by AWS KMS (KMS charges apply).
+ SSEType *string `type:"string" enum:"SSEType"`
+}
+
+// String returns the string representation
+func (s SSESpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SSESpecification) GoString() string {
+ return s.String()
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *SSESpecification) SetEnabled(v bool) *SSESpecification {
+ s.Enabled = &v
+ return s
+}
+
+// SetKMSMasterKeyId sets the KMSMasterKeyId field's value.
+func (s *SSESpecification) SetKMSMasterKeyId(v string) *SSESpecification {
+ s.KMSMasterKeyId = &v
+ return s
+}
+
+// SetSSEType sets the SSEType field's value.
+func (s *SSESpecification) SetSSEType(v string) *SSESpecification {
+ s.SSEType = &v
+ return s
+}
+
+// Represents the input of a Scan operation.
+type ScanInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more information,
+ // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributesToGet []*string `min:"1" type:"list"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
+
+ // A Boolean value that determines the read consistency model during the scan:
+ //
+ // * If ConsistentRead is false, then the data returned from Scan might not
+ // contain the results from other recently completed write operations (PutItem,
+ // UpdateItem, or DeleteItem).
+ //
+ // * If ConsistentRead is true, then all of the write operations that completed
+ // before the Scan began are guaranteed to be contained in the Scan response.
+ //
+ // The default setting for ConsistentRead is false.
+ //
+ // The ConsistentRead parameter is not supported on global secondary indexes.
+ // If you scan a global secondary index with ConsistentRead set to true, you
+ // will receive a ValidationException.
+ ConsistentRead *bool `type:"boolean"`
+
+ // The primary key of the first item that this operation will evaluate. Use
+ // the value that was returned for LastEvaluatedKey in the previous operation.
+ //
+ // The data type for ExclusiveStartKey must be String, Number or Binary. No
+ // set data types are allowed.
+ //
+ // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify
+ // the same segment whose previous Scan returned the corresponding value of
+ // LastEvaluatedKey.
+ ExclusiveStartKey map[string]*AttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see Specifying Item Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Condition Expressions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // A string that contains conditions that DynamoDB applies after the Scan operation,
+ // but before the data is returned to you. Items that do not satisfy the FilterExpression
+ // criteria are not returned.
+ //
+ // A FilterExpression is applied after the items have already been read; the
+ // process of filtering does not consume any additional read capacity units.
+ //
+ // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults)
+ // in the Amazon DynamoDB Developer Guide.
+ FilterExpression *string `type:"string"`
+
+ // The name of a secondary index to scan. This index can be any local secondary
+ // index or global secondary index. Note that if you use the IndexName parameter,
+ // you must also provide TableName.
+ IndexName *string `min:"3" type:"string"`
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while processing
+ // the results, it stops the operation and returns the matching values up to
+ // that point, and a key in LastEvaluatedKey to apply in a subsequent operation,
+ // so that you can pick up where you left off. Also, if the processed dataset
+ // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
+ // and returns the matching values up to the limit, and a key in LastEvaluatedKey
+ // to apply in a subsequent operation to continue the operation. For more information,
+ // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Limit *int64 `min:"1" type:"integer"`
+
+ // A string that identifies one or more attributes to retrieve from the specified
+ // table or index. These attributes can include scalars, sets, or elements of
+ // a JSON document. The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned.
+ // If any of the requested attributes are not found, they will not appear in
+ // the result.
+ //
+ // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ProjectionExpression *string `type:"string"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ScanFilter map[string]*Condition `type:"map"`
+
+ // For a parallel Scan request, Segment identifies an individual segment to
+ // be scanned by an application worker.
+ //
+ // Segment IDs are zero-based, so the first segment is always 0. For example,
+ // if you want to use four application threads to scan a table or an index,
+ // then the first thread specifies a Segment value of 0, the second thread specifies
+ // 1, and so on.
+ //
+ // The value of LastEvaluatedKey returned from a parallel Scan request must
+ // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan
+ // operation.
+ //
+ // The value for Segment must be greater than or equal to 0, and less than the
+ // value provided for TotalSegments.
+ //
+ // If you provide Segment, you must also provide TotalSegments.
+ Segment *int64 `type:"integer"`
+
+ // The attributes to be returned in the result. You can retrieve all item attributes,
+ // specific item attributes, the count of matching items, or in the case of
+ // an index, some or all of the attributes projected into the index.
+ //
+ // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified
+ // table or index. If you query a local secondary index, then for each matching
+ // item in the index, DynamoDB fetches the entire item from the parent table.
+ // If the index is configured to project all item attributes, then all of
+ // the data can be obtained from the local secondary index, and no fetching
+ // is required.
+ //
+ // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
+ // all attributes that have been projected into the index. If the index is
+ // configured to project all attributes, this return value is equivalent
+ // to specifying ALL_ATTRIBUTES.
+ //
+ // * COUNT - Returns the number of matching items, rather than the matching
+ // items themselves.
+ //
+ // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet.
+ // This return value is equivalent to specifying AttributesToGet without
+ // specifying any value for Select. If you query or scan a local secondary
+ // index and request only attributes that are projected into that index,
+ // the operation reads only the index and not the table. If any of the requested
+ // attributes are not projected into the local secondary index, DynamoDB
+ // fetches each of these attributes from the parent table. This extra fetching
+ // incurs additional throughput cost and latency. If you query or scan a
+ // global secondary index, you can only request attributes that are projected
+ // into the index. Global secondary index queries cannot fetch attributes
+ // from the parent table.
+ //
+ // If neither Select nor AttributesToGet are specified, DynamoDB defaults to
+ // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
+ // accessing an index. You cannot use both Select and AttributesToGet together
+ // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES.
+ // (This usage is equivalent to specifying AttributesToGet without any value
+ // for Select.)
+ //
+ // If you use the ProjectionExpression parameter, then the value for Select
+ // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an
+ // error.
+ Select *string `type:"string" enum:"Select"`
+
+ // The name of the table containing the requested items; or, if you provide
+ // IndexName, the name of the table to which that index belongs.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // For a parallel Scan request, TotalSegments represents the total number of
+ // segments into which the Scan operation will be divided. The value of TotalSegments
+ // corresponds to the number of application workers that will perform the parallel
+ // scan. For example, if you want to use four application threads to scan a
+ // table or an index, specify a TotalSegments value of 4.
+ //
+ // The value for TotalSegments must be greater than or equal to 1, and less
+ // than or equal to 1000000. If you specify a TotalSegments value of 1, the
+ // Scan operation will be sequential rather than parallel.
+ //
+ // If you specify TotalSegments, you must also specify Segment.
+ TotalSegments *int64 `min:"1" type:"integer"`
+}
+
+// String returns the string representation
+func (s ScanInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ScanInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ScanInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ScanInput"}
+ if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.Limit != nil && *s.Limit < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.TotalSegments != nil && *s.TotalSegments < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1))
+ }
+ if s.ScanFilter != nil {
+ for i, v := range s.ScanFilter {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributesToGet sets the AttributesToGet field's value.
+func (s *ScanInput) SetAttributesToGet(v []*string) *ScanInput {
+ s.AttributesToGet = v
+ return s
+}
+
+// SetConditionalOperator sets the ConditionalOperator field's value.
+func (s *ScanInput) SetConditionalOperator(v string) *ScanInput {
+ s.ConditionalOperator = &v
+ return s
+}
+
+// SetConsistentRead sets the ConsistentRead field's value.
+func (s *ScanInput) SetConsistentRead(v bool) *ScanInput {
+ s.ConsistentRead = &v
+ return s
+}
+
+// SetExclusiveStartKey sets the ExclusiveStartKey field's value.
+func (s *ScanInput) SetExclusiveStartKey(v map[string]*AttributeValue) *ScanInput {
+ s.ExclusiveStartKey = v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *ScanInput) SetExpressionAttributeNames(v map[string]*string) *ScanInput {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *ScanInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *ScanInput {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetFilterExpression sets the FilterExpression field's value.
+func (s *ScanInput) SetFilterExpression(v string) *ScanInput {
+ s.FilterExpression = &v
+ return s
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *ScanInput) SetIndexName(v string) *ScanInput {
+ s.IndexName = &v
+ return s
+}
+
+// SetLimit sets the Limit field's value.
+func (s *ScanInput) SetLimit(v int64) *ScanInput {
+ s.Limit = &v
+ return s
+}
+
+// SetProjectionExpression sets the ProjectionExpression field's value.
+func (s *ScanInput) SetProjectionExpression(v string) *ScanInput {
+ s.ProjectionExpression = &v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *ScanInput) SetReturnConsumedCapacity(v string) *ScanInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetScanFilter sets the ScanFilter field's value.
+func (s *ScanInput) SetScanFilter(v map[string]*Condition) *ScanInput {
+ s.ScanFilter = v
+ return s
+}
+
+// SetSegment sets the Segment field's value.
+func (s *ScanInput) SetSegment(v int64) *ScanInput {
+ s.Segment = &v
+ return s
+}
+
+// SetSelect sets the Select field's value.
+func (s *ScanInput) SetSelect(v string) *ScanInput {
+ s.Select = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *ScanInput) SetTableName(v string) *ScanInput {
+ s.TableName = &v
+ return s
+}
+
+// SetTotalSegments sets the TotalSegments field's value.
+func (s *ScanInput) SetTotalSegments(v int64) *ScanInput {
+ s.TotalSegments = &v
+ return s
+}
+
+// Represents the output of a Scan operation.
+type ScanOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The capacity units consumed by the Scan operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the
+ // table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // The number of items in the response.
+ //
+ // If you set ScanFilter in the request, then Count is the number of items returned
+ // after the filter was applied, and ScannedCount is the number of matching
+ // items before the filter was applied.
+ //
+ // If you did not use a filter in the request, then Count is the same as ScannedCount.
+ Count *int64 `type:"integer"`
+
+ // An array of item attributes that match the scan criteria. Each element in
+ // this array consists of an attribute name and the value for that attribute.
+ Items []map[string]*AttributeValue `type:"list"`
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request.
+ //
+ // If LastEvaluatedKey is empty, then the "last page" of results has been processed
+ // and there is no more data to be retrieved.
+ //
+ // If LastEvaluatedKey is not empty, it does not necessarily mean that there
+ // is more data in the result set. The only way to know when you have reached
+ // the end of the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]*AttributeValue `type:"map"`
+
+ // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount
+ // value with few, or no, Count results indicates an inefficient Scan operation.
+ // For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // If you did not use a filter in the request, then ScannedCount is the same
+ // as Count.
+ ScannedCount *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s ScanOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ScanOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *ScanOutput) SetConsumedCapacity(v *ConsumedCapacity) *ScanOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetCount sets the Count field's value.
+func (s *ScanOutput) SetCount(v int64) *ScanOutput {
+ s.Count = &v
+ return s
+}
+
+// SetItems sets the Items field's value.
+func (s *ScanOutput) SetItems(v []map[string]*AttributeValue) *ScanOutput {
+ s.Items = v
+ return s
+}
+
+// SetLastEvaluatedKey sets the LastEvaluatedKey field's value.
+func (s *ScanOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ScanOutput {
+ s.LastEvaluatedKey = v
+ return s
+}
+
+// SetScannedCount sets the ScannedCount field's value.
+func (s *ScanOutput) SetScannedCount(v int64) *ScanOutput {
+ s.ScannedCount = &v
+ return s
+}
+
+// Contains the details of the table when the backup was created.
+type SourceTableDetails struct {
+ _ struct{} `type:"structure"`
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. This setting can be changed later.
+ //
+ // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend
+ // using PROVISIONED for predictable workloads.
+ //
+ // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST.
+ // We recommend using PAY_PER_REQUEST for unpredictable workloads.
+ BillingMode *string `type:"string" enum:"BillingMode"`
+
+ // Number of items in the table. Please note this is an approximate value.
+ ItemCount *int64 `type:"long"`
+
+ // Schema of the table.
+ //
+ // KeySchema is a required field
+ KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
+
+ // Read IOPs and Write IOPS on the table when the backup was created.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+
+ // ARN of the table for which backup was created.
+ TableArn *string `type:"string"`
+
+ // Time when the source table was created.
+ //
+ // TableCreationDateTime is a required field
+ TableCreationDateTime *time.Time `type:"timestamp" required:"true"`
+
+ // Unique identifier for the table for which the backup was created.
+ //
+ // TableId is a required field
+ TableId *string `type:"string" required:"true"`
+
+ // The name of the table for which the backup was created.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // Size of the table in bytes. Please note this is an approximate value.
+ TableSizeBytes *int64 `type:"long"`
+}
+
+// String returns the string representation
+func (s SourceTableDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SourceTableDetails) GoString() string {
+ return s.String()
+}
+
+// SetBillingMode sets the BillingMode field's value.
+func (s *SourceTableDetails) SetBillingMode(v string) *SourceTableDetails {
+ s.BillingMode = &v
+ return s
+}
+
+// SetItemCount sets the ItemCount field's value.
+func (s *SourceTableDetails) SetItemCount(v int64) *SourceTableDetails {
+ s.ItemCount = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDetails {
+ s.KeySchema = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// SetTableArn sets the TableArn field's value.
+func (s *SourceTableDetails) SetTableArn(v string) *SourceTableDetails {
+ s.TableArn = &v
+ return s
+}
+
+// SetTableCreationDateTime sets the TableCreationDateTime field's value.
+func (s *SourceTableDetails) SetTableCreationDateTime(v time.Time) *SourceTableDetails {
+ s.TableCreationDateTime = &v
+ return s
+}
+
+// SetTableId sets the TableId field's value.
+func (s *SourceTableDetails) SetTableId(v string) *SourceTableDetails {
+ s.TableId = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *SourceTableDetails) SetTableName(v string) *SourceTableDetails {
+ s.TableName = &v
+ return s
+}
+
+// SetTableSizeBytes sets the TableSizeBytes field's value.
+func (s *SourceTableDetails) SetTableSizeBytes(v int64) *SourceTableDetails {
+ s.TableSizeBytes = &v
+ return s
+}
+
+// Contains the details of the features enabled on the table when the backup
+// was created. For example, LSIs, GSIs, streams, TTL.
+type SourceTableFeatureDetails struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the GSI properties for the table when the backup was created.
+ // It includes the IndexName, KeySchema, Projection and ProvisionedThroughput
+ // for the GSIs on the table at the time of backup.
+ GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"`
+
+ // Represents the LSI properties for the table when the backup was created.
+ // It includes the IndexName, KeySchema and Projection for the LSIs on the table
+ // at the time of backup.
+ LocalSecondaryIndexes []*LocalSecondaryIndexInfo `type:"list"`
+
+ // The description of the server-side encryption status on the table when the
+ // backup was created.
+ SSEDescription *SSEDescription `type:"structure"`
+
+ // Stream settings on the table when the backup was created.
+ StreamDescription *StreamSpecification `type:"structure"`
+
+ // Time to Live settings on the table when the backup was created.
+ TimeToLiveDescription *TimeToLiveDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s SourceTableFeatureDetails) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SourceTableFeatureDetails) GoString() string {
+ return s.String()
+}
+
+// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
+func (s *SourceTableFeatureDetails) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexInfo) *SourceTableFeatureDetails {
+ s.GlobalSecondaryIndexes = v
+ return s
+}
+
+// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
+func (s *SourceTableFeatureDetails) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexInfo) *SourceTableFeatureDetails {
+ s.LocalSecondaryIndexes = v
+ return s
+}
+
+// SetSSEDescription sets the SSEDescription field's value.
+func (s *SourceTableFeatureDetails) SetSSEDescription(v *SSEDescription) *SourceTableFeatureDetails {
+ s.SSEDescription = v
+ return s
+}
+
+// SetStreamDescription sets the StreamDescription field's value.
+func (s *SourceTableFeatureDetails) SetStreamDescription(v *StreamSpecification) *SourceTableFeatureDetails {
+ s.StreamDescription = v
+ return s
+}
+
+// SetTimeToLiveDescription sets the TimeToLiveDescription field's value.
+func (s *SourceTableFeatureDetails) SetTimeToLiveDescription(v *TimeToLiveDescription) *SourceTableFeatureDetails {
+ s.TimeToLiveDescription = v
+ return s
+}
+
+// Represents the DynamoDB Streams configuration for a table in DynamoDB.
+type StreamSpecification struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates whether DynamoDB Streams is enabled (true) or disabled (false)
+ // on the table.
+ StreamEnabled *bool `type:"boolean"`
+
+ // When an item in the table is modified, StreamViewType determines what information
+ // is written to the stream for this table. Valid values for StreamViewType
+ // are:
+ //
+ // * KEYS_ONLY - Only the key attributes of the modified item are written
+ // to the stream.
+ //
+ // * NEW_IMAGE - The entire item, as it appears after it was modified, is
+ // written to the stream.
+ //
+ // * OLD_IMAGE - The entire item, as it appeared before it was modified,
+ // is written to the stream.
+ //
+ // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item
+ // are written to the stream.
+ StreamViewType *string `type:"string" enum:"StreamViewType"`
+}
+
+// String returns the string representation
+func (s StreamSpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StreamSpecification) GoString() string {
+ return s.String()
+}
+
+// SetStreamEnabled sets the StreamEnabled field's value.
+func (s *StreamSpecification) SetStreamEnabled(v bool) *StreamSpecification {
+ s.StreamEnabled = &v
+ return s
+}
+
+// SetStreamViewType sets the StreamViewType field's value.
+func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification {
+ s.StreamViewType = &v
+ return s
+}
+
+// Represents the properties of a table.
+type TableDescription struct {
+ _ struct{} `type:"structure"`
+
+ // An array of AttributeDefinition objects. Each of these objects describes
+ // one attribute in the table and index key schema.
+ //
+ // Each AttributeDefinition object in this array is composed of:
+ //
+ // * AttributeName - The name of the attribute.
+ //
+ // * AttributeType - The data type for the attribute.
+ AttributeDefinitions []*AttributeDefinition `type:"list"`
+
+ // Contains the details for the read/write capacity mode.
+ BillingModeSummary *BillingModeSummary `type:"structure"`
+
+ // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/)
+ // format.
+ CreationDateTime *time.Time `type:"timestamp"`
+
+ // The global secondary indexes, if any, on the table. Each index is scoped
+ // to a given partition key value. Each element is composed of:
+ //
+ // * Backfilling - If true, then the index is currently in the backfilling
+ // phase. Backfilling occurs only when a new global secondary index is added
+ // to the table; it is the process by which DynamoDB populates the new index
+ // with data from the table. (This attribute does not appear for indexes
+ // that were created during a CreateTable operation.)
+ //
+ // * IndexName - The name of the global secondary index.
+ //
+ // * IndexSizeBytes - The total size of the global secondary index, in bytes.
+ // DynamoDB updates this value approximately every six hours. Recent changes
+ // might not be reflected in this value.
+ //
+ // * IndexStatus - The current status of the global secondary index: CREATING
+ // - The index is being created. UPDATING - The index is being updated. DELETING
+ // - The index is being deleted. ACTIVE - The index is ready for use.
+ //
+ // * ItemCount - The number of items in the global secondary index. DynamoDB
+ // updates this value approximately every six hours. Recent changes might
+ // not be reflected in this value.
+ //
+ // * KeySchema - Specifies the complete index key schema. The attribute names
+ // in the key schema must be between 1 and 255 characters (inclusive). The
+ // key schema must begin with the same partition key as the table.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
+ // - Only the index and primary keys are projected into the index. INCLUDE
+ // - Only the specified table attributes are projected into the index. The
+ // list of projected attributes are in NonKeyAttributes. ALL - All of the
+ // table attributes are projected into the index. NonKeyAttributes - A list
+ // of one or more non-key attribute names that are projected into the secondary
+ // index. The total count of attributes provided in NonKeyAttributes, summed
+ // across all of the secondary indexes, must not exceed 20. If you project
+ // the same attribute into two different indexes, this counts as two distinct
+ // attributes when determining the total.
+ //
+ // * ProvisionedThroughput - The provisioned throughput settings for the
+ // global secondary index, consisting of read and write capacity units, along
+ // with data about increases and decreases.
+ //
+ // If the table is in the DELETING state, no information about indexes will
+ // be returned.
+ GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"`
+
+ // The number of items in the specified table. DynamoDB updates this value approximately
+ // every six hours. Recent changes might not be reflected in this value.
+ ItemCount *int64 `type:"long"`
+
+ // The primary key structure for the table. Each KeySchemaElement consists of:
+ //
+ // * AttributeName - The name of the attribute.
+ //
+ // * KeyType - The role of the attribute: HASH - partition key RANGE - sort
+ // key The partition key of an item is also known as its hash attribute.
+ // The term "hash attribute" derives from DynamoDB's usage of an internal
+ // hash function to evenly distribute data items across partitions, based
+ // on their partition key values. The sort key of an item is also known as
+ // its range attribute. The term "range attribute" derives from the way DynamoDB
+ // stores items with the same partition key physically close together, in
+ // sorted order by the sort key value.
+ //
+ // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey)
+ // in the Amazon DynamoDB Developer Guide.
+ KeySchema []*KeySchemaElement `min:"1" type:"list"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the latest stream
+ // for this table.
+ LatestStreamArn *string `min:"37" type:"string"`
+
+ // A timestamp, in ISO 8601 format, for this stream.
+ //
+ // Note that LatestStreamLabel is not a unique identifier for the stream, because
+ // it is possible that a stream from another table might have the same timestamp.
+ // However, the combination of the following three elements is guaranteed to
+ // be unique:
+ //
+ // * the AWS customer ID.
+ //
+ // * the table name.
+ //
+ // * the StreamLabel.
+ LatestStreamLabel *string `type:"string"`
+
+ // Represents one or more local secondary indexes on the table. Each index is
+ // scoped to a given partition key value. Tables with one or more local secondary
+ // indexes are subject to an item collection size limit, where the amount of
+ // data within a given item collection cannot exceed 10 GB. Each element is
+ // composed of:
+ //
+ // * IndexName - The name of the local secondary index.
+ //
+ // * KeySchema - Specifies the complete index key schema. The attribute names
+ // in the key schema must be between 1 and 255 characters (inclusive). The
+ // key schema must begin with the same partition key as the table.
+ //
+ // * Projection - Specifies attributes that are copied (projected) from the
+ // table into the index. These are in addition to the primary key attributes
+ // and index key attributes, which are automatically projected. Each attribute
+ // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
+ // - Only the index and primary keys are projected into the index. INCLUDE
+ // - Only the specified table attributes are projected into the index. The
+ // list of projected attributes are in NonKeyAttributes. ALL - All of the
+ // table attributes are projected into the index. NonKeyAttributes - A list
+ // of one or more non-key attribute names that are projected into the secondary
+ // index. The total count of attributes provided in NonKeyAttributes, summed
+ // across all of the secondary indexes, must not exceed 20. If you project
+ // the same attribute into two different indexes, this counts as two distinct
+ // attributes when determining the total.
+ //
+ // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB
+ // updates this value approximately every six hours. Recent changes might
+ // not be reflected in this value.
+ //
+ // * ItemCount - Represents the number of items in the index. DynamoDB updates
+ // this value approximately every six hours. Recent changes might not be
+ // reflected in this value.
+ //
+ // If the table is in the DELETING state, no information about indexes will
+ // be returned.
+ LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"`
+
+ // The provisioned throughput settings for the table, consisting of read and
+ // write capacity units, along with data about increases and decreases.
+ ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
+
+ // Contains details for the restore.
+ RestoreSummary *RestoreSummary `type:"structure"`
+
+ // The description of the server-side encryption status on the specified table.
+ SSEDescription *SSEDescription `type:"structure"`
+
+ // The current DynamoDB Streams configuration for the table.
+ StreamSpecification *StreamSpecification `type:"structure"`
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the table.
+ TableArn *string `type:"string"`
+
+ // Unique identifier for the table for which the backup was created.
+ TableId *string `type:"string"`
+
+ // The name of the table.
+ TableName *string `min:"3" type:"string"`
+
+ // The total size of the specified table, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ TableSizeBytes *int64 `type:"long"`
+
+ // The current state of the table:
+ //
+ // * CREATING - The table is being created.
+ //
+ // * UPDATING - The table is being updated.
+ //
+ // * DELETING - The table is being deleted.
+ //
+ // * ACTIVE - The table is ready for use.
+ TableStatus *string `type:"string" enum:"TableStatus"`
+}
+
+// String returns the string representation
+func (s TableDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TableDescription) GoString() string {
+ return s.String()
+}
+
+// SetAttributeDefinitions sets the AttributeDefinitions field's value.
+func (s *TableDescription) SetAttributeDefinitions(v []*AttributeDefinition) *TableDescription {
+ s.AttributeDefinitions = v
+ return s
+}
+
+// SetBillingModeSummary sets the BillingModeSummary field's value.
+func (s *TableDescription) SetBillingModeSummary(v *BillingModeSummary) *TableDescription {
+ s.BillingModeSummary = v
+ return s
+}
+
+// SetCreationDateTime sets the CreationDateTime field's value.
+func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription {
+ s.CreationDateTime = &v
+ return s
+}
+
+// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
+func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription {
+ s.GlobalSecondaryIndexes = v
+ return s
+}
+
+// SetItemCount sets the ItemCount field's value.
+func (s *TableDescription) SetItemCount(v int64) *TableDescription {
+ s.ItemCount = &v
+ return s
+}
+
+// SetKeySchema sets the KeySchema field's value.
+func (s *TableDescription) SetKeySchema(v []*KeySchemaElement) *TableDescription {
+ s.KeySchema = v
+ return s
+}
+
+// SetLatestStreamArn sets the LatestStreamArn field's value.
+func (s *TableDescription) SetLatestStreamArn(v string) *TableDescription {
+ s.LatestStreamArn = &v
+ return s
+}
+
+// SetLatestStreamLabel sets the LatestStreamLabel field's value.
+func (s *TableDescription) SetLatestStreamLabel(v string) *TableDescription {
+ s.LatestStreamLabel = &v
+ return s
+}
+
+// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
+func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDescription) *TableDescription {
+ s.LocalSecondaryIndexes = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// SetRestoreSummary sets the RestoreSummary field's value.
+func (s *TableDescription) SetRestoreSummary(v *RestoreSummary) *TableDescription {
+ s.RestoreSummary = v
+ return s
+}
+
+// SetSSEDescription sets the SSEDescription field's value.
+func (s *TableDescription) SetSSEDescription(v *SSEDescription) *TableDescription {
+ s.SSEDescription = v
+ return s
+}
+
+// SetStreamSpecification sets the StreamSpecification field's value.
+func (s *TableDescription) SetStreamSpecification(v *StreamSpecification) *TableDescription {
+ s.StreamSpecification = v
+ return s
+}
+
+// SetTableArn sets the TableArn field's value.
+func (s *TableDescription) SetTableArn(v string) *TableDescription {
+ s.TableArn = &v
+ return s
+}
+
+// SetTableId sets the TableId field's value.
+func (s *TableDescription) SetTableId(v string) *TableDescription {
+ s.TableId = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *TableDescription) SetTableName(v string) *TableDescription {
+ s.TableName = &v
+ return s
+}
+
+// SetTableSizeBytes sets the TableSizeBytes field's value.
+func (s *TableDescription) SetTableSizeBytes(v int64) *TableDescription {
+ s.TableSizeBytes = &v
+ return s
+}
+
+// SetTableStatus sets the TableStatus field's value.
+func (s *TableDescription) SetTableStatus(v string) *TableDescription {
+ s.TableStatus = &v
+ return s
+}
+
+// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to
+// a single DynamoDB table.
+//
+// AWS-assigned tag names and values are automatically assigned the aws: prefix,
+// which the user cannot assign. AWS-assigned tag names do not count towards
+// the tag limit of 50. User-assigned tag names have the prefix user: in the
+// Cost Allocation Report. You cannot backdate the application of a tag.
+//
+// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
+// in the Amazon DynamoDB Developer Guide.
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can
+ // only have up to one tag with the same key. If you try to add an existing
+ // tag (same key), the existing tag value will be updated to the new value.
+ //
+ // Key is a required field
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // The value of the tag. Tag values are case-sensitive and can be null.
+ //
+ // Value is a required field
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tag"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Value == nil {
+ invalidParams.Add(request.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+type TagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // Identifies the Amazon DynamoDB resource to which tags should be added. This
+ // value is an Amazon Resource Name (ARN).
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"1" type:"string" required:"true"`
+
+ // The tags to be assigned to the Amazon DynamoDB resource.
+ //
+ // Tags is a required field
+ Tags []*Tag `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s TagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TagResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
+ }
+ if s.Tags == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tags"))
+ }
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput {
+ s.Tags = v
+ return s
+}
+
+type TagResourceOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s TagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceOutput) GoString() string {
+ return s.String()
+}
+
+// The description of the Time to Live (TTL) status on the specified table.
+type TimeToLiveDescription struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the TTL attribute for items in the table.
+ AttributeName *string `min:"1" type:"string"`
+
+ // The TTL status for the table.
+ TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"`
+}
+
+// String returns the string representation
+func (s TimeToLiveDescription) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TimeToLiveDescription) GoString() string {
+ return s.String()
+}
+
+// SetAttributeName sets the AttributeName field's value.
+func (s *TimeToLiveDescription) SetAttributeName(v string) *TimeToLiveDescription {
+ s.AttributeName = &v
+ return s
+}
+
+// SetTimeToLiveStatus sets the TimeToLiveStatus field's value.
+func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescription {
+ s.TimeToLiveStatus = &v
+ return s
+}
+
+// Represents the settings used to enable or disable Time to Live (TTL) for
+// the specified table.
+type TimeToLiveSpecification struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the TTL attribute used to store the expiration time for items
+ // in the table.
+ //
+ // AttributeName is a required field
+ AttributeName *string `min:"1" type:"string" required:"true"`
+
+ // Indicates whether TTL is to be enabled (true) or disabled (false) on the
+ // table.
+ //
+ // Enabled is a required field
+ Enabled *bool `type:"boolean" required:"true"`
+}
+
+// String returns the string representation
+func (s TimeToLiveSpecification) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TimeToLiveSpecification) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TimeToLiveSpecification) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TimeToLiveSpecification"}
+ if s.AttributeName == nil {
+ invalidParams.Add(request.NewErrParamRequired("AttributeName"))
+ }
+ if s.AttributeName != nil && len(*s.AttributeName) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1))
+ }
+ if s.Enabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("Enabled"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeName sets the AttributeName field's value.
+func (s *TimeToLiveSpecification) SetAttributeName(v string) *TimeToLiveSpecification {
+ s.AttributeName = &v
+ return s
+}
+
+// SetEnabled sets the Enabled field's value.
+func (s *TimeToLiveSpecification) SetEnabled(v bool) *TimeToLiveSpecification {
+ s.Enabled = &v
+ return s
+}
+
+// Specifies an item to be retrieved as part of the transaction.
+type TransactGetItem struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the primary key that identifies the item to get, together with the
+ // name of the table that contains the item, and optionally the specific attributes
+ // of the item to retrieve.
+ //
+ // Get is a required field
+ Get *Get `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s TransactGetItem) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TransactGetItem) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TransactGetItem) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TransactGetItem"}
+ if s.Get == nil {
+ invalidParams.Add(request.NewErrParamRequired("Get"))
+ }
+ if s.Get != nil {
+ if err := s.Get.Validate(); err != nil {
+ invalidParams.AddNested("Get", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGet sets the Get field's value.
+func (s *TransactGetItem) SetGet(v *Get) *TransactGetItem {
+ s.Get = v
+ return s
+}
+
+type TransactGetItemsInput struct {
+ _ struct{} `type:"structure"`
+
+ // A value of TOTAL causes consumed capacity information to be returned, and
+ // a value of NONE prevents that information from being returned. No other value
+ // is valid.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // An ordered array of up to 25 TransactGetItem objects, each of which contains
+ // a Get structure.
+ //
+ // TransactItems is a required field
+ TransactItems []*TransactGetItem `min:"1" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s TransactGetItemsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TransactGetItemsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TransactGetItemsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TransactGetItemsInput"}
+ if s.TransactItems == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransactItems"))
+ }
+ if s.TransactItems != nil && len(s.TransactItems) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1))
+ }
+ if s.TransactItems != nil {
+ for i, v := range s.TransactItems {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *TransactGetItemsInput) SetReturnConsumedCapacity(v string) *TransactGetItemsInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetTransactItems sets the TransactItems field's value.
+func (s *TransactGetItemsInput) SetTransactItems(v []*TransactGetItem) *TransactGetItemsInput {
+ s.TransactItems = v
+ return s
+}
+
+type TransactGetItemsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity
+ // objects, one for each table addressed by TransactGetItem objects in the TransactItems
+ // parameter. These ConsumedCapacity objects report the read-capacity units
+ // consumed by the TransactGetItems call in that table.
+ ConsumedCapacity []*ConsumedCapacity `type:"list"`
+
+ // An ordered array of up to 25 ItemResponse objects, each of which corresponds
+ // to the TransactGetItem object in the same position in the TransactItems array.
+ // Each ItemResponse object contains a Map of the name-value pairs that are
+ // the projected attributes of the requested item.
+ //
+ // If a requested item could not be retrieved, the corresponding ItemResponse
+ // object is Null, or if the requested item has no projected attributes, the
+ // corresponding ItemResponse object is an empty Map.
+ Responses []*ItemResponse `min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s TransactGetItemsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TransactGetItemsOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *TransactGetItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactGetItemsOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetResponses sets the Responses field's value.
+func (s *TransactGetItemsOutput) SetResponses(v []*ItemResponse) *TransactGetItemsOutput {
+ s.Responses = v
+ return s
+}
+
+// A list of requests that can perform update, put, delete, or check operations
+// on multiple items in one or more tables atomically.
+type TransactWriteItem struct {
+ _ struct{} `type:"structure"`
+
+ // A request to perform a check item operation.
+ ConditionCheck *ConditionCheck `type:"structure"`
+
+ // A request to perform a DeleteItem operation.
+ Delete *Delete `type:"structure"`
+
+ // A request to perform a PutItem operation.
+ Put *Put `type:"structure"`
+
+ // A request to perform an UpdateItem operation.
+ Update *Update `type:"structure"`
+}
+
+// String returns the string representation
+func (s TransactWriteItem) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TransactWriteItem) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TransactWriteItem) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TransactWriteItem"}
+ if s.ConditionCheck != nil {
+ if err := s.ConditionCheck.Validate(); err != nil {
+ invalidParams.AddNested("ConditionCheck", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Put != nil {
+ if err := s.Put.Validate(); err != nil {
+ invalidParams.AddNested("Put", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Update != nil {
+ if err := s.Update.Validate(); err != nil {
+ invalidParams.AddNested("Update", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionCheck sets the ConditionCheck field's value.
+func (s *TransactWriteItem) SetConditionCheck(v *ConditionCheck) *TransactWriteItem {
+ s.ConditionCheck = v
+ return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *TransactWriteItem) SetDelete(v *Delete) *TransactWriteItem {
+ s.Delete = v
+ return s
+}
+
+// SetPut sets the Put field's value.
+func (s *TransactWriteItem) SetPut(v *Put) *TransactWriteItem {
+ s.Put = v
+ return s
+}
+
+// SetUpdate sets the Update field's value.
+func (s *TransactWriteItem) SetUpdate(v *Update) *TransactWriteItem {
+ s.Update = v
+ return s
+}
+
+type TransactWriteItemsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent,
+ // meaning that multiple identical calls have the same effect as one single
+ // call.
+ //
+ // Although multiple identical calls using the same client request token produce
+ // the same result on the server (no side effects), the responses to the calls
+ // might not be the same. If the ReturnConsumedCapacity> parameter is set, then
+ // the initial TransactWriteItems call returns the amount of write capacity
+ // units consumed in making the changes. Subsequent TransactWriteItems calls
+ // with the same client token return the number of read capacity units consumed
+ // in reading the item.
+ //
+ // A client request token is valid for 10 minutes after the first request that
+ // uses it is completed. After 10 minutes, any request with the same client
+ // token is treated as a new request. Do not resubmit the same request with
+ // the same client token for more than 10 minutes, or the result might not be
+ // idempotent.
+ //
+ // If you submit a request with the same client token but a change in other
+ // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch
+ // exception.
+ ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections (if any), that were
+ // modified during the operation and are returned in the response. If set to
+ // NONE (the default), no statistics are returned.
+ ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
+
+ // An ordered array of up to 25 TransactWriteItem objects, each of which contains
+ // a ConditionCheck, Put, Update, or Delete object. These can operate on items
+ // in different tables, but the tables must reside in the same AWS account and
+ // Region, and no two of them can operate on the same item.
+ //
+ // TransactItems is a required field
+ TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s TransactWriteItemsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TransactWriteItemsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TransactWriteItemsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TransactWriteItemsInput"}
+ if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
+ }
+ if s.TransactItems == nil {
+ invalidParams.Add(request.NewErrParamRequired("TransactItems"))
+ }
+ if s.TransactItems != nil && len(s.TransactItems) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1))
+ }
+ if s.TransactItems != nil {
+ for i, v := range s.TransactItems {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetClientRequestToken sets the ClientRequestToken field's value.
+func (s *TransactWriteItemsInput) SetClientRequestToken(v string) *TransactWriteItemsInput {
+ s.ClientRequestToken = &v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *TransactWriteItemsInput) SetReturnConsumedCapacity(v string) *TransactWriteItemsInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
+func (s *TransactWriteItemsInput) SetReturnItemCollectionMetrics(v string) *TransactWriteItemsInput {
+ s.ReturnItemCollectionMetrics = &v
+ return s
+}
+
+// SetTransactItems sets the TransactItems field's value.
+func (s *TransactWriteItemsInput) SetTransactItems(v []*TransactWriteItem) *TransactWriteItemsInput {
+ s.TransactItems = v
+ return s
+}
+
+type TransactWriteItemsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The capacity units consumed by the entire TransactWriteItems operation. The
+ // values of the list are ordered according to the ordering of the TransactItems
+ // request parameter.
+ ConsumedCapacity []*ConsumedCapacity `type:"list"`
+
+ // A list of tables that were processed by TransactWriteItems and, for each
+ // table, information about any item collections that were affected by individual
+ // UpdateItem, PutItem, or DeleteItem operations.
+ ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"`
+}
+
+// String returns the string representation
+func (s TransactWriteItemsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TransactWriteItemsOutput) GoString() string {
+ return s.String()
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *TransactWriteItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactWriteItemsOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
+func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *TransactWriteItemsOutput {
+ s.ItemCollectionMetrics = v
+ return s
+}
+
+type UntagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The DynamoDB resource that the tags will be removed from. This value is an
+ // Amazon Resource Name (ARN).
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `min:"1" type:"string" required:"true"`
+
+ // A list of tag keys. Existing tags of the resource whose keys are members
+ // of this list will be removed from the DynamoDB resource.
+ //
+ // TagKeys is a required field
+ TagKeys []*string `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s UntagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UntagResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
+ }
+ if s.TagKeys == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagKeys"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+// SetTagKeys sets the TagKeys field's value.
+func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
+ s.TagKeys = v
+ return s
+}
+
+type UntagResourceOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s UntagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceOutput) GoString() string {
+ return s.String()
+}
+
+// Represents a request to perform an UpdateItem operation.
+type Update struct {
+ _ struct{} `type:"structure"`
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ ConditionExpression *string `type:"string"`
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // The primary key of the item to be updated. Each element consists of an attribute
+ // name and a value for that attribute.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
+ // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid
+ // values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW.
+ ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
+
+ // Name of the table for the UpdateItem request.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // An expression that defines one or more attributes to be updated, the action
+ // to be performed on them, and new value(s) for them.
+ //
+ // UpdateExpression is a required field
+ UpdateExpression *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Update) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Update) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Update) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Update"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.UpdateExpression == nil {
+ invalidParams.Add(request.NewErrParamRequired("UpdateExpression"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *Update) SetConditionExpression(v string) *Update {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *Update) SetExpressionAttributeNames(v map[string]*string) *Update {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *Update) SetExpressionAttributeValues(v map[string]*AttributeValue) *Update {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Update) SetKey(v map[string]*AttributeValue) *Update {
+ s.Key = v
+ return s
+}
+
+// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
+func (s *Update) SetReturnValuesOnConditionCheckFailure(v string) *Update {
+ s.ReturnValuesOnConditionCheckFailure = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *Update) SetTableName(v string) *Update {
+ s.TableName = &v
+ return s
+}
+
+// SetUpdateExpression sets the UpdateExpression field's value.
+func (s *Update) SetUpdateExpression(v string) *Update {
+ s.UpdateExpression = &v
+ return s
+}
+
+type UpdateContinuousBackupsInput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the settings used to enable point in time recovery.
+ //
+ // PointInTimeRecoverySpecification is a required field
+ PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"`
+
+ // The name of the table.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateContinuousBackupsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateContinuousBackupsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateContinuousBackupsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"}
+ if s.PointInTimeRecoverySpecification == nil {
+ invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoverySpecification"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.PointInTimeRecoverySpecification != nil {
+ if err := s.PointInTimeRecoverySpecification.Validate(); err != nil {
+ invalidParams.AddNested("PointInTimeRecoverySpecification", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPointInTimeRecoverySpecification sets the PointInTimeRecoverySpecification field's value.
+func (s *UpdateContinuousBackupsInput) SetPointInTimeRecoverySpecification(v *PointInTimeRecoverySpecification) *UpdateContinuousBackupsInput {
+ s.PointInTimeRecoverySpecification = v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *UpdateContinuousBackupsInput) SetTableName(v string) *UpdateContinuousBackupsInput {
+ s.TableName = &v
+ return s
+}
+
+type UpdateContinuousBackupsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the continuous backups and point in time recovery settings on
+ // the table.
+ ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateContinuousBackupsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateContinuousBackupsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value.
+func (s *UpdateContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *UpdateContinuousBackupsOutput {
+ s.ContinuousBackupsDescription = v
+ return s
+}
+
+// Represents the new provisioned throughput settings to be applied to a global
+// secondary index.
+type UpdateGlobalSecondaryIndexAction struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global secondary index to be updated.
+ //
+ // IndexName is a required field
+ IndexName *string `min:"3" type:"string" required:"true"`
+
+ // Represents the provisioned throughput settings for the specified global secondary
+ // index.
+ //
+ // For current minimum and maximum provisioned throughput values, see Limits
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // ProvisionedThroughput is a required field
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalSecondaryIndexAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalSecondaryIndexAction) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateGlobalSecondaryIndexAction) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"}
+ if s.IndexName == nil {
+ invalidParams.Add(request.NewErrParamRequired("IndexName"))
+ }
+ if s.IndexName != nil && len(*s.IndexName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
+ }
+ if s.ProvisionedThroughput == nil {
+ invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput"))
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetIndexName sets the IndexName field's value.
+func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalSecondaryIndexAction {
+ s.IndexName = &v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+type UpdateGlobalTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // The global table name.
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+
+ // A list of Regions that should be added or removed from the global table.
+ //
+ // ReplicaUpdates is a required field
+ ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateGlobalTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableInput"}
+ if s.GlobalTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
+ }
+ if s.ReplicaUpdates == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReplicaUpdates"))
+ }
+ if s.ReplicaUpdates != nil {
+ for i, v := range s.ReplicaUpdates {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *UpdateGlobalTableInput) SetGlobalTableName(v string) *UpdateGlobalTableInput {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetReplicaUpdates sets the ReplicaUpdates field's value.
+func (s *UpdateGlobalTableInput) SetReplicaUpdates(v []*ReplicaUpdate) *UpdateGlobalTableInput {
+ s.ReplicaUpdates = v
+ return s
+}
+
+type UpdateGlobalTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the details of the global table.
+ GlobalTableDescription *GlobalTableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTableDescription sets the GlobalTableDescription field's value.
+func (s *UpdateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *UpdateGlobalTableOutput {
+ s.GlobalTableDescription = v
+ return s
+}
+
+type UpdateGlobalTableSettingsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The billing mode of the global table. If GlobalTableBillingMode is not specified,
+ // the global table defaults to PROVISIONED capacity billing mode.
+ GlobalTableBillingMode *string `type:"string" enum:"BillingMode"`
+
+ // Represents the settings of a global secondary index for a global table that
+ // will be modified.
+ GlobalTableGlobalSecondaryIndexSettingsUpdate []*GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"`
+
+ // The name of the global table
+ //
+ // GlobalTableName is a required field
+ GlobalTableName *string `min:"3" type:"string" required:"true"`
+
+ // Auto scaling settings for managing provisioned write capacity for the global
+ // table.
+ GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
+
+ // The maximum number of writes consumed per second before DynamoDB returns
+ // a ThrottlingException.
+ GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
+
+ // Represents the settings for a global table in a Region that will be modified.
+ ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableSettingsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableSettingsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateGlobalTableSettingsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"}
+ if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1))
+ }
+ if s.GlobalTableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
+ }
+ if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
+ }
+ if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1))
+ }
+ if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ReplicaSettingsUpdate", 1))
+ }
+ if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil {
+ for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
+ invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.ReplicaSettingsUpdate != nil {
+ for i, v := range s.ReplicaSettingsUpdate {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetGlobalTableBillingMode sets the GlobalTableBillingMode field's value.
+func (s *UpdateGlobalTableSettingsInput) SetGlobalTableBillingMode(v string) *UpdateGlobalTableSettingsInput {
+ s.GlobalTableBillingMode = &v
+ return s
+}
+
+// SetGlobalTableGlobalSecondaryIndexSettingsUpdate sets the GlobalTableGlobalSecondaryIndexSettingsUpdate field's value.
+func (s *UpdateGlobalTableSettingsInput) SetGlobalTableGlobalSecondaryIndexSettingsUpdate(v []*GlobalTableGlobalSecondaryIndexSettingsUpdate) *UpdateGlobalTableSettingsInput {
+ s.GlobalTableGlobalSecondaryIndexSettingsUpdate = v
+ return s
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *UpdateGlobalTableSettingsInput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsInput {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate sets the GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate field's value.
+func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *UpdateGlobalTableSettingsInput {
+ s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate = v
+ return s
+}
+
+// SetGlobalTableProvisionedWriteCapacityUnits sets the GlobalTableProvisionedWriteCapacityUnits field's value.
+func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityUnits(v int64) *UpdateGlobalTableSettingsInput {
+ s.GlobalTableProvisionedWriteCapacityUnits = &v
+ return s
+}
+
+// SetReplicaSettingsUpdate sets the ReplicaSettingsUpdate field's value.
+func (s *UpdateGlobalTableSettingsInput) SetReplicaSettingsUpdate(v []*ReplicaSettingsUpdate) *UpdateGlobalTableSettingsInput {
+ s.ReplicaSettingsUpdate = v
+ return s
+}
+
+type UpdateGlobalTableSettingsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the global table.
+ GlobalTableName *string `min:"3" type:"string"`
+
+ // The Region-specific settings for the global table.
+ ReplicaSettings []*ReplicaSettingsDescription `type:"list"`
+}
+
+// String returns the string representation
+func (s UpdateGlobalTableSettingsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateGlobalTableSettingsOutput) GoString() string {
+ return s.String()
+}
+
+// SetGlobalTableName sets the GlobalTableName field's value.
+func (s *UpdateGlobalTableSettingsOutput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsOutput {
+ s.GlobalTableName = &v
+ return s
+}
+
+// SetReplicaSettings sets the ReplicaSettings field's value.
+func (s *UpdateGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *UpdateGlobalTableSettingsOutput {
+ s.ReplicaSettings = v
+ return s
+}
+
+// Represents the input of an UpdateItem operation.
+type UpdateItemInput struct {
+ _ struct{} `type:"structure"`
+
+ // This is a legacy parameter. Use UpdateExpression instead. For more information,
+ // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html)
+ // in the Amazon DynamoDB Developer Guide.
+ AttributeUpdates map[string]*AttributeValueUpdate `type:"map"`
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // * Functions: attribute_exists | attribute_not_exists | attribute_type
+ // | contains | begins_with | size These function names are case-sensitive.
+ //
+ // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // * Logical operators: AND | OR | NOT
+ //
+ // For more information about condition expressions, see Specifying Conditions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionExpression *string `type:"string"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more information,
+ // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
+ // in the Amazon DynamoDB Developer Guide.
+ Expected map[string]*ExpectedAttributeValue `type:"map"`
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames:
+ //
+ // * To access an attribute whose name conflicts with a DynamoDB reserved
+ // word.
+ //
+ // * To create a placeholder for repeating occurrences of an attribute name
+ // in an expression.
+ //
+ // * To prevent special characters in an attribute name from being misinterpreted
+ // in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // * Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be
+ // used directly in an expression. (For the complete list of reserved words,
+ // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
+ // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify
+ // the following for ExpressionAttributeNames:
+ //
+ // * {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // * #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information about expression attribute names, see Specifying Item
+ // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeNames map[string]*string `type:"map"`
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute
+ // value. For example, suppose that you wanted to check whether the value of
+ // the ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
+ // }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see Condition Expressions
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
+
+ // The primary key of the item to be updated. Each element consists of an attribute
+ // name and a value for that attribute.
+ //
+ // For the primary key, you must provide all of the attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // Key is a required field
+ Key map[string]*AttributeValue `type:"map" required:"true"`
+
+ // Determines the level of detail about provisioned throughput consumption that
+ // is returned in the response:
+ //
+ // * INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary
+ // index that was accessed. Note that some operations, such as GetItem and
+ // BatchGetItem, do not access any indexes at all. In these cases, specifying
+ // INDEXES will only return ConsumedCapacity information for table(s).
+ //
+ // * TOTAL - The response includes only the aggregate ConsumedCapacity for
+ // the operation.
+ //
+ // * NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
+
+ // Determines whether item collection metrics are returned. If set to SIZE,
+ // the response includes statistics about item collections, if any, that were
+ // modified during the operation are returned in the response. If set to NONE
+ // (the default), no statistics are returned.
+ ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
+
+ // Use ReturnValues if you want to get the item attributes as they appear before
+ // or after they are updated. For UpdateItem, the valid values are:
+ //
+ // * NONE - If ReturnValues is not specified, or if its value is NONE, then
+ // nothing is returned. (This setting is the default for ReturnValues.)
+ //
+ // * ALL_OLD - Returns all of the attributes of the item, as they appeared
+ // before the UpdateItem operation.
+ //
+ // * UPDATED_OLD - Returns only the updated attributes, as they appeared
+ // before the UpdateItem operation.
+ //
+ // * ALL_NEW - Returns all of the attributes of the item, as they appear
+ // after the UpdateItem operation.
+ //
+ // * UPDATED_NEW - Returns only the updated attributes, as they appear after
+ // the UpdateItem operation.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ //
+ // The values returned are strongly consistent.
+ ReturnValues *string `type:"string" enum:"ReturnValue"`
+
+ // The name of the table containing the item to update.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // An expression that defines one or more attributes to be updated, the action
+ // to be performed on them, and new values for them.
+ //
+ // The following action values are available for UpdateExpression.
+ //
+ // * SET - Adds one or more attributes and values to an item. If any of these
+ // attributes already exist, they are replaced by the new values. You can
+ // also use SET to add or subtract from an attribute that is of type Number.
+ // For example: SET myNum = myNum + :val SET supports the following functions:
+ // if_not_exists (path, operand) - if the item does not contain an attribute
+ // at the specified path, then if_not_exists evaluates to operand; otherwise,
+ // it evaluates to path. You can use this function to avoid overwriting an
+ // attribute that may already be present in the item. list_append (operand,
+ // operand) - evaluates to a list with a new element added to it. You can
+ // append the new element to the start or the end of the list by reversing
+ // the order of the operands. These function names are case-sensitive.
+ //
+ // * REMOVE - Removes one or more attributes from an item.
+ //
+ // * ADD - Adds the specified value to the item, if the attribute does not
+ // already exist. If the attribute does exist, then the behavior of ADD depends
+ // on the data type of the attribute: If the existing attribute is a number,
+ // and if Value is also a number, then Value is mathematically added to the
+ // existing attribute. If Value is a negative number, then it is subtracted
+ // from the existing attribute. If you use ADD to increment or decrement
+ // a number value for an item that doesn't exist before the update, DynamoDB
+ // uses 0 as the initial value. Similarly, if you use ADD for an existing
+ // item to increment or decrement an attribute value that doesn't exist before
+ // the update, DynamoDB uses 0 as the initial value. For example, suppose
+ // that the item you want to update doesn't have an attribute named itemcount,
+ // but you decide to ADD the number 3 to this attribute anyway. DynamoDB
+ // will create the itemcount attribute, set its initial value to 0, and finally
+ // add 3 to it. The result will be a new itemcount attribute in the item,
+ // with a value of 3. If the existing data type is a set and if Value is
+ // also a set, then Value is added to the existing set. For example, if the
+ // attribute value is the set [1,2], and the ADD action specified [3], then
+ // the final attribute value is [1,2,3]. An error occurs if an ADD action
+ // is specified for a set attribute and the attribute type specified does
+ // not match the existing set type. Both sets must have the same primitive
+ // data type. For example, if the existing data type is a set of strings,
+ // the Value must also be a set of strings. The ADD action only supports
+ // Number and set data types. In addition, ADD can only be used on top-level
+ // attributes, not nested attributes.
+ //
+ // * DELETE - Deletes an element from a set. If a set of values is specified,
+ // then those values are subtracted from the old set. For example, if the
+ // attribute value was the set [a,b,c] and the DELETE action specifies [a,c],
+ // then the final attribute value is [b]. Specifying an empty set is an error.
+ // The DELETE action only supports set data types. In addition, DELETE can
+ // only be used on top-level attributes, not nested attributes.
+ //
+ // You can have many actions in a single expression, such as the following:
+ // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
+ //
+ // For more information on update expressions, see Modifying Items and Attributes
+ // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
+ // in the Amazon DynamoDB Developer Guide.
+ UpdateExpression *string `type:"string"`
+}
+
+// String returns the string representation
+func (s UpdateItemInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateItemInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateItemInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeUpdates sets the AttributeUpdates field's value.
+func (s *UpdateItemInput) SetAttributeUpdates(v map[string]*AttributeValueUpdate) *UpdateItemInput {
+ s.AttributeUpdates = v
+ return s
+}
+
+// SetConditionExpression sets the ConditionExpression field's value.
+func (s *UpdateItemInput) SetConditionExpression(v string) *UpdateItemInput {
+ s.ConditionExpression = &v
+ return s
+}
+
+// SetConditionalOperator sets the ConditionalOperator field's value.
+func (s *UpdateItemInput) SetConditionalOperator(v string) *UpdateItemInput {
+ s.ConditionalOperator = &v
+ return s
+}
+
+// SetExpected sets the Expected field's value.
+func (s *UpdateItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *UpdateItemInput {
+ s.Expected = v
+ return s
+}
+
+// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
+func (s *UpdateItemInput) SetExpressionAttributeNames(v map[string]*string) *UpdateItemInput {
+ s.ExpressionAttributeNames = v
+ return s
+}
+
+// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
+func (s *UpdateItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *UpdateItemInput {
+ s.ExpressionAttributeValues = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UpdateItemInput) SetKey(v map[string]*AttributeValue) *UpdateItemInput {
+ s.Key = v
+ return s
+}
+
+// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
+func (s *UpdateItemInput) SetReturnConsumedCapacity(v string) *UpdateItemInput {
+ s.ReturnConsumedCapacity = &v
+ return s
+}
+
+// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
+func (s *UpdateItemInput) SetReturnItemCollectionMetrics(v string) *UpdateItemInput {
+ s.ReturnItemCollectionMetrics = &v
+ return s
+}
+
+// SetReturnValues sets the ReturnValues field's value.
+func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput {
+ s.ReturnValues = &v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput {
+ s.TableName = &v
+ return s
+}
+
+// SetUpdateExpression sets the UpdateExpression field's value.
+func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput {
+ s.UpdateExpression = &v
+ return s
+}
+
+// Represents the output of an UpdateItem operation.
+type UpdateItemOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attribute values as they appear before or after the UpdateItem operation,
+ // as determined by the ReturnValues parameter.
+ //
+ // The Attributes map is only present if ReturnValues was specified as something
+ // other than NONE in the request. Each element represents one attribute.
+ Attributes map[string]*AttributeValue `type:"map"`
+
+ // The capacity units consumed by the UpdateItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics
+ // for the table and any indexes involved in the operation. ConsumedCapacity
+ // is only returned if the ReturnConsumedCapacity parameter was specified. For
+ // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html)
+ // in the Amazon DynamoDB Developer Guide.
+ ConsumedCapacity *ConsumedCapacity `type:"structure"`
+
+ // Information about item collections, if any, that were affected by the UpdateItem
+ // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
+ // parameter was specified. If the table does not have any local secondary indexes,
+ // this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // * ItemCollectionKey - The partition key value of the item collection.
+ // This is the same as the partition key value of the item itself.
+ //
+ // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper
+ // bound for the estimate. The estimate includes the size of all the items
+ // in the table, plus the size of all attributes projected into all of the
+ // local secondary indexes on that table. Use this estimate to measure whether
+ // a local secondary index is approaching its size limit. The estimate is
+ // subject to change over time; therefore, do not rely on the precision or
+ // accuracy of the estimate.
+ ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateItemOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateItemOutput) GoString() string {
+ return s.String()
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *UpdateItemOutput) SetAttributes(v map[string]*AttributeValue) *UpdateItemOutput {
+ s.Attributes = v
+ return s
+}
+
+// SetConsumedCapacity sets the ConsumedCapacity field's value.
+func (s *UpdateItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *UpdateItemOutput {
+ s.ConsumedCapacity = v
+ return s
+}
+
+// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
+func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *UpdateItemOutput {
+ s.ItemCollectionMetrics = v
+ return s
+}
+
+// Represents the input of an UpdateTable operation.
+type UpdateTableInput struct {
+ _ struct{} `type:"structure"`
+
+ // An array of attributes that describe the key schema for the table and indexes.
+ // If you are adding a new global secondary index to the table, AttributeDefinitions
+ // must include the key element(s) of the new index.
+ AttributeDefinitions []*AttributeDefinition `type:"list"`
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. When switching from pay-per-request to provisioned capacity, initial
+ // provisioned capacity values must be set. The initial provisioned capacity
+ // values are estimated based on the consumed read and write capacity of your
+ // table and global secondary indexes over the past 30 minutes.
+ //
+ // * PROVISIONED - Sets the billing mode to PROVISIONED. We recommend using
+ // PROVISIONED for predictable workloads.
+ //
+ // * PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST. We recommend
+ // using PAY_PER_REQUEST for unpredictable workloads.
+ BillingMode *string `type:"string" enum:"BillingMode"`
+
+ // An array of one or more global secondary indexes for the table. For each
+ // index in the array, you can request one action:
+ //
+ // * Create - add a new global secondary index to the table.
+ //
+ // * Update - modify the provisioned throughput settings of an existing global
+ // secondary index.
+ //
+ // * Delete - remove a global secondary index from the table.
+ //
+ // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html)
+ // in the Amazon DynamoDB Developer Guide.
+ GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"`
+
+ // The new provisioned throughput settings for the specified table or index.
+ ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
+
+ // The new server-side encryption settings for the specified table.
+ SSESpecification *SSESpecification `type:"structure"`
+
+ // Represents the DynamoDB Streams configuration for the table.
+ //
+ // You receive a ResourceInUseException if you try to enable a stream on a table
+ // that already has a stream, or if you try to disable a stream on a table that
+ // doesn't have a stream.
+ StreamSpecification *StreamSpecification `type:"structure"`
+
+ // The name of the table to be updated.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateTableInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTableInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateTableInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"}
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.AttributeDefinitions != nil {
+ for i, v := range s.AttributeDefinitions {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.GlobalSecondaryIndexUpdates != nil {
+ for i, v := range s.GlobalSecondaryIndexUpdates {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.ProvisionedThroughput != nil {
+ if err := s.ProvisionedThroughput.Validate(); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeDefinitions sets the AttributeDefinitions field's value.
+func (s *UpdateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *UpdateTableInput {
+ s.AttributeDefinitions = v
+ return s
+}
+
+// SetBillingMode sets the BillingMode field's value.
+func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput {
+ s.BillingMode = &v
+ return s
+}
+
+// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value.
+func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput {
+ s.GlobalSecondaryIndexUpdates = v
+ return s
+}
+
+// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
+func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput {
+ s.ProvisionedThroughput = v
+ return s
+}
+
+// SetSSESpecification sets the SSESpecification field's value.
+func (s *UpdateTableInput) SetSSESpecification(v *SSESpecification) *UpdateTableInput {
+ s.SSESpecification = v
+ return s
+}
+
+// SetStreamSpecification sets the StreamSpecification field's value.
+func (s *UpdateTableInput) SetStreamSpecification(v *StreamSpecification) *UpdateTableInput {
+ s.StreamSpecification = v
+ return s
+}
+
+// SetTableName sets the TableName field's value.
+func (s *UpdateTableInput) SetTableName(v string) *UpdateTableInput {
+ s.TableName = &v
+ return s
+}
+
+// Represents the output of an UpdateTable operation.
+type UpdateTableOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the properties of the table.
+ TableDescription *TableDescription `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateTableOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTableOutput) GoString() string {
+ return s.String()
+}
+
+// SetTableDescription sets the TableDescription field's value.
+func (s *UpdateTableOutput) SetTableDescription(v *TableDescription) *UpdateTableOutput {
+ s.TableDescription = v
+ return s
+}
+
+// Represents the input of an UpdateTimeToLive operation.
+type UpdateTimeToLiveInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the table to be configured.
+ //
+ // TableName is a required field
+ TableName *string `min:"3" type:"string" required:"true"`
+
+ // Represents the settings used to enable or disable Time to Live for the specified
+ // table.
+ //
+ // TimeToLiveSpecification is a required field
+ TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s UpdateTimeToLiveInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTimeToLiveInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UpdateTimeToLiveInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UpdateTimeToLiveInput"}
+ if s.TableName == nil {
+ invalidParams.Add(request.NewErrParamRequired("TableName"))
+ }
+ if s.TableName != nil && len(*s.TableName) < 3 {
+ invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
+ }
+ if s.TimeToLiveSpecification == nil {
+ invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification"))
+ }
+ if s.TimeToLiveSpecification != nil {
+ if err := s.TimeToLiveSpecification.Validate(); err != nil {
+ invalidParams.AddNested("TimeToLiveSpecification", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTableName sets the TableName field's value.
+func (s *UpdateTimeToLiveInput) SetTableName(v string) *UpdateTimeToLiveInput {
+ s.TableName = &v
+ return s
+}
+
+// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value.
+func (s *UpdateTimeToLiveInput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveInput {
+ s.TimeToLiveSpecification = v
+ return s
+}
+
+type UpdateTimeToLiveOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Represents the output of an UpdateTimeToLive operation.
+ TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"`
+}
+
+// String returns the string representation
+func (s UpdateTimeToLiveOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UpdateTimeToLiveOutput) GoString() string {
+ return s.String()
+}
+
+// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value.
+func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveOutput {
+ s.TimeToLiveSpecification = v
+ return s
+}
+
+// Represents an operation to perform - either DeleteItem or PutItem. You can
+// only request one of these operations, not both, in a single WriteRequest.
+// If you do need to perform both of these operations, you will need to provide
+// two separate WriteRequest objects.
+type WriteRequest struct {
+ _ struct{} `type:"structure"`
+
+ // A request to perform a DeleteItem operation.
+ DeleteRequest *DeleteRequest `type:"structure"`
+
+ // A request to perform a PutItem operation.
+ PutRequest *PutRequest `type:"structure"`
+}
+
+// String returns the string representation
+func (s WriteRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WriteRequest) GoString() string {
+ return s.String()
+}
+
+// SetDeleteRequest sets the DeleteRequest field's value.
+func (s *WriteRequest) SetDeleteRequest(v *DeleteRequest) *WriteRequest {
+ s.DeleteRequest = v
+ return s
+}
+
+// SetPutRequest sets the PutRequest field's value.
+func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest {
+ s.PutRequest = v
+ return s
+}
+
+const (
+ // AttributeActionAdd is a AttributeAction enum value
+ AttributeActionAdd = "ADD"
+
+ // AttributeActionPut is a AttributeAction enum value
+ AttributeActionPut = "PUT"
+
+ // AttributeActionDelete is a AttributeAction enum value
+ AttributeActionDelete = "DELETE"
+)
+
+const (
+ // BackupStatusCreating is a BackupStatus enum value
+ BackupStatusCreating = "CREATING"
+
+ // BackupStatusDeleted is a BackupStatus enum value
+ BackupStatusDeleted = "DELETED"
+
+ // BackupStatusAvailable is a BackupStatus enum value
+ BackupStatusAvailable = "AVAILABLE"
+)
+
+const (
+ // BackupTypeUser is a BackupType enum value
+ BackupTypeUser = "USER"
+
+ // BackupTypeSystem is a BackupType enum value
+ BackupTypeSystem = "SYSTEM"
+
+ // BackupTypeAwsBackup is a BackupType enum value
+ BackupTypeAwsBackup = "AWS_BACKUP"
+)
+
+const (
+ // BackupTypeFilterUser is a BackupTypeFilter enum value
+ BackupTypeFilterUser = "USER"
+
+ // BackupTypeFilterSystem is a BackupTypeFilter enum value
+ BackupTypeFilterSystem = "SYSTEM"
+
+ // BackupTypeFilterAwsBackup is a BackupTypeFilter enum value
+ BackupTypeFilterAwsBackup = "AWS_BACKUP"
+
+ // BackupTypeFilterAll is a BackupTypeFilter enum value
+ BackupTypeFilterAll = "ALL"
+)
+
+const (
+ // BillingModeProvisioned is a BillingMode enum value
+ BillingModeProvisioned = "PROVISIONED"
+
+ // BillingModePayPerRequest is a BillingMode enum value
+ BillingModePayPerRequest = "PAY_PER_REQUEST"
+)
+
+const (
+ // ComparisonOperatorEq is a ComparisonOperator enum value
+ ComparisonOperatorEq = "EQ"
+
+ // ComparisonOperatorNe is a ComparisonOperator enum value
+ ComparisonOperatorNe = "NE"
+
+ // ComparisonOperatorIn is a ComparisonOperator enum value
+ ComparisonOperatorIn = "IN"
+
+ // ComparisonOperatorLe is a ComparisonOperator enum value
+ ComparisonOperatorLe = "LE"
+
+ // ComparisonOperatorLt is a ComparisonOperator enum value
+ ComparisonOperatorLt = "LT"
+
+ // ComparisonOperatorGe is a ComparisonOperator enum value
+ ComparisonOperatorGe = "GE"
+
+ // ComparisonOperatorGt is a ComparisonOperator enum value
+ ComparisonOperatorGt = "GT"
+
+ // ComparisonOperatorBetween is a ComparisonOperator enum value
+ ComparisonOperatorBetween = "BETWEEN"
+
+ // ComparisonOperatorNotNull is a ComparisonOperator enum value
+ ComparisonOperatorNotNull = "NOT_NULL"
+
+ // ComparisonOperatorNull is a ComparisonOperator enum value
+ ComparisonOperatorNull = "NULL"
+
+ // ComparisonOperatorContains is a ComparisonOperator enum value
+ ComparisonOperatorContains = "CONTAINS"
+
+ // ComparisonOperatorNotContains is a ComparisonOperator enum value
+ ComparisonOperatorNotContains = "NOT_CONTAINS"
+
+ // ComparisonOperatorBeginsWith is a ComparisonOperator enum value
+ ComparisonOperatorBeginsWith = "BEGINS_WITH"
+)
+
+const (
+ // ConditionalOperatorAnd is a ConditionalOperator enum value
+ ConditionalOperatorAnd = "AND"
+
+ // ConditionalOperatorOr is a ConditionalOperator enum value
+ ConditionalOperatorOr = "OR"
+)
+
+const (
+ // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value
+ ContinuousBackupsStatusEnabled = "ENABLED"
+
+ // ContinuousBackupsStatusDisabled is a ContinuousBackupsStatus enum value
+ ContinuousBackupsStatusDisabled = "DISABLED"
+)
+
+const (
+ // GlobalTableStatusCreating is a GlobalTableStatus enum value
+ GlobalTableStatusCreating = "CREATING"
+
+ // GlobalTableStatusActive is a GlobalTableStatus enum value
+ GlobalTableStatusActive = "ACTIVE"
+
+ // GlobalTableStatusDeleting is a GlobalTableStatus enum value
+ GlobalTableStatusDeleting = "DELETING"
+
+ // GlobalTableStatusUpdating is a GlobalTableStatus enum value
+ GlobalTableStatusUpdating = "UPDATING"
+)
+
+const (
+ // IndexStatusCreating is a IndexStatus enum value
+ IndexStatusCreating = "CREATING"
+
+ // IndexStatusUpdating is a IndexStatus enum value
+ IndexStatusUpdating = "UPDATING"
+
+ // IndexStatusDeleting is a IndexStatus enum value
+ IndexStatusDeleting = "DELETING"
+
+ // IndexStatusActive is a IndexStatus enum value
+ IndexStatusActive = "ACTIVE"
+)
+
+const (
+ // KeyTypeHash is a KeyType enum value
+ KeyTypeHash = "HASH"
+
+ // KeyTypeRange is a KeyType enum value
+ KeyTypeRange = "RANGE"
+)
+
+const (
+ // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value
+ PointInTimeRecoveryStatusEnabled = "ENABLED"
+
+ // PointInTimeRecoveryStatusDisabled is a PointInTimeRecoveryStatus enum value
+ PointInTimeRecoveryStatusDisabled = "DISABLED"
+)
+
+const (
+ // ProjectionTypeAll is a ProjectionType enum value
+ ProjectionTypeAll = "ALL"
+
+ // ProjectionTypeKeysOnly is a ProjectionType enum value
+ ProjectionTypeKeysOnly = "KEYS_ONLY"
+
+ // ProjectionTypeInclude is a ProjectionType enum value
+ ProjectionTypeInclude = "INCLUDE"
+)
+
+const (
+ // ReplicaStatusCreating is a ReplicaStatus enum value
+ ReplicaStatusCreating = "CREATING"
+
+ // ReplicaStatusUpdating is a ReplicaStatus enum value
+ ReplicaStatusUpdating = "UPDATING"
+
+ // ReplicaStatusDeleting is a ReplicaStatus enum value
+ ReplicaStatusDeleting = "DELETING"
+
+ // ReplicaStatusActive is a ReplicaStatus enum value
+ ReplicaStatusActive = "ACTIVE"
+)
+
+// Determines the level of detail about provisioned throughput consumption that
+// is returned in the response:
+//
+// * INDEXES - The response includes the aggregate ConsumedCapacity for the
+// operation, together with ConsumedCapacity for each table and secondary
+// index that was accessed. Note that some operations, such as GetItem and
+// BatchGetItem, do not access any indexes at all. In these cases, specifying
+// INDEXES will only return ConsumedCapacity information for table(s).
+//
+// * TOTAL - The response includes only the aggregate ConsumedCapacity for
+// the operation.
+//
+// * NONE - No ConsumedCapacity details are included in the response.
+const (
+ // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value
+ ReturnConsumedCapacityIndexes = "INDEXES"
+
+ // ReturnConsumedCapacityTotal is a ReturnConsumedCapacity enum value
+ ReturnConsumedCapacityTotal = "TOTAL"
+
+ // ReturnConsumedCapacityNone is a ReturnConsumedCapacity enum value
+ ReturnConsumedCapacityNone = "NONE"
+)
+
+const (
+ // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value
+ ReturnItemCollectionMetricsSize = "SIZE"
+
+ // ReturnItemCollectionMetricsNone is a ReturnItemCollectionMetrics enum value
+ ReturnItemCollectionMetricsNone = "NONE"
+)
+
+const (
+ // ReturnValueNone is a ReturnValue enum value
+ ReturnValueNone = "NONE"
+
+ // ReturnValueAllOld is a ReturnValue enum value
+ ReturnValueAllOld = "ALL_OLD"
+
+ // ReturnValueUpdatedOld is a ReturnValue enum value
+ ReturnValueUpdatedOld = "UPDATED_OLD"
+
+ // ReturnValueAllNew is a ReturnValue enum value
+ ReturnValueAllNew = "ALL_NEW"
+
+ // ReturnValueUpdatedNew is a ReturnValue enum value
+ ReturnValueUpdatedNew = "UPDATED_NEW"
+)
+
+const (
+ // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value
+ ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD"
+
+ // ReturnValuesOnConditionCheckFailureNone is a ReturnValuesOnConditionCheckFailure enum value
+ ReturnValuesOnConditionCheckFailureNone = "NONE"
+)
+
+const (
+ // SSEStatusEnabling is a SSEStatus enum value
+ SSEStatusEnabling = "ENABLING"
+
+ // SSEStatusEnabled is a SSEStatus enum value
+ SSEStatusEnabled = "ENABLED"
+
+ // SSEStatusDisabling is a SSEStatus enum value
+ SSEStatusDisabling = "DISABLING"
+
+ // SSEStatusDisabled is a SSEStatus enum value
+ SSEStatusDisabled = "DISABLED"
+
+ // SSEStatusUpdating is a SSEStatus enum value
+ SSEStatusUpdating = "UPDATING"
+)
+
+const (
+ // SSETypeAes256 is a SSEType enum value
+ SSETypeAes256 = "AES256"
+
+ // SSETypeKms is a SSEType enum value
+ SSETypeKms = "KMS"
+)
+
+const (
+ // ScalarAttributeTypeS is a ScalarAttributeType enum value
+ ScalarAttributeTypeS = "S"
+
+ // ScalarAttributeTypeN is a ScalarAttributeType enum value
+ ScalarAttributeTypeN = "N"
+
+ // ScalarAttributeTypeB is a ScalarAttributeType enum value
+ ScalarAttributeTypeB = "B"
+)
+
+const (
+ // SelectAllAttributes is a Select enum value
+ SelectAllAttributes = "ALL_ATTRIBUTES"
+
+ // SelectAllProjectedAttributes is a Select enum value
+ SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES"
+
+ // SelectSpecificAttributes is a Select enum value
+ SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES"
+
+ // SelectCount is a Select enum value
+ SelectCount = "COUNT"
+)
+
+const (
+ // StreamViewTypeNewImage is a StreamViewType enum value
+ StreamViewTypeNewImage = "NEW_IMAGE"
+
+ // StreamViewTypeOldImage is a StreamViewType enum value
+ StreamViewTypeOldImage = "OLD_IMAGE"
+
+ // StreamViewTypeNewAndOldImages is a StreamViewType enum value
+ StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES"
+
+ // StreamViewTypeKeysOnly is a StreamViewType enum value
+ StreamViewTypeKeysOnly = "KEYS_ONLY"
+)
+
+const (
+ // TableStatusCreating is a TableStatus enum value
+ TableStatusCreating = "CREATING"
+
+ // TableStatusUpdating is a TableStatus enum value
+ TableStatusUpdating = "UPDATING"
+
+ // TableStatusDeleting is a TableStatus enum value
+ TableStatusDeleting = "DELETING"
+
+ // TableStatusActive is a TableStatus enum value
+ TableStatusActive = "ACTIVE"
+)
+
+const (
+ // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value
+ TimeToLiveStatusEnabling = "ENABLING"
+
+ // TimeToLiveStatusDisabling is a TimeToLiveStatus enum value
+ TimeToLiveStatusDisabling = "DISABLING"
+
+ // TimeToLiveStatusEnabled is a TimeToLiveStatus enum value
+ TimeToLiveStatusEnabled = "ENABLED"
+
+ // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value
+ TimeToLiveStatusDisabled = "DISABLED"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go
new file mode 100644
index 000000000..c019e63df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go
@@ -0,0 +1,98 @@
+package dynamodb
+
+import (
+ "bytes"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func init() {
+ initClient = func(c *client.Client) {
+ if c.Config.Retryer == nil {
+ // Only override the retryer with a custom one if the config
+ // does not already contain a retryer
+ setCustomRetryer(c)
+ }
+
+ c.Handlers.Build.PushBack(disableCompression)
+ c.Handlers.Unmarshal.PushFront(validateCRC32)
+ }
+}
+
+func setCustomRetryer(c *client.Client) {
+ maxRetries := aws.IntValue(c.Config.MaxRetries)
+ if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 10
+ }
+
+ c.Retryer = client.DefaultRetryer{
+ NumMaxRetries: maxRetries,
+ MinRetryDelay: 50 * time.Millisecond,
+ }
+}
+
+func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) {
+ if length < 0 {
+ length = 0
+ }
+ buf := bytes.NewBuffer(make([]byte, 0, length))
+
+ if _, err = buf.ReadFrom(b); err != nil {
+ return nil, err
+ }
+ if err = b.Close(); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+func disableCompression(r *request.Request) {
+ r.HTTPRequest.Header.Set("Accept-Encoding", "identity")
+}
+
+func validateCRC32(r *request.Request) {
+ if r.Error != nil {
+ return // already have an error, no need to verify CRC
+ }
+
+ // Checksum validation is off, skip
+ if aws.BoolValue(r.Config.DisableComputeChecksums) {
+ return
+ }
+
+ // Try to get CRC from response
+ header := r.HTTPResponse.Header.Get("X-Amz-Crc32")
+ if header == "" {
+ return // No header, skip
+ }
+
+ expected, err := strconv.ParseUint(header, 10, 32)
+ if err != nil {
+ return // Could not determine CRC value, skip
+ }
+
+ buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength)
+ if err != nil { // failed to read the response body, skip
+ return
+ }
+
+ // Reset body for subsequent reads
+ r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))
+
+ // Compute the CRC checksum
+ crc := crc32.ChecksumIEEE(buf.Bytes())
+
+ if crc != uint32(expected) {
+ // CRC does not match, set a retryable error
+ r.Retryable = aws.Bool(true)
+ r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go
new file mode 100644
index 000000000..f244a7330
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go
@@ -0,0 +1,45 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package dynamodb provides the client and types for making API
+// requests to Amazon DynamoDB.
+//
+// Amazon DynamoDB is a fully managed NoSQL database service that provides fast
+// and predictable performance with seamless scalability. DynamoDB lets you
+// offload the administrative burdens of operating and scaling a distributed
+// database, so that you don't have to worry about hardware provisioning, setup
+// and configuration, replication, software patching, or cluster scaling.
+//
+// With DynamoDB, you can create database tables that can store and retrieve
+// any amount of data, and serve any level of request traffic. You can scale
+// up or scale down your tables' throughput capacity without downtime or performance
+// degradation, and use the AWS Management Console to monitor resource utilization
+// and performance metrics.
+//
+// DynamoDB automatically spreads the data and traffic for your tables over
+// a sufficient number of servers to handle your throughput and storage requirements,
+// while maintaining consistent and fast performance. All of your data is stored
+// on solid state disks (SSDs) and automatically replicated across multiple
+// Availability Zones in an AWS region, providing built-in high availability
+// and data durability.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service.
+//
+// See dynamodb package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/
+//
+// Using the Client
+//
+// To contact Amazon DynamoDB with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon DynamoDB client DynamoDB for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New
+package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go
new file mode 100644
index 000000000..013e9b1d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go
@@ -0,0 +1,27 @@
+/*
+AttributeValue Marshaling and Unmarshaling Helpers
+
+Utility helpers to marshal and unmarshal AttributeValue to and
+from Go types can be found in the dynamodbattribute sub package. This package
+provides specialized functions for the common ways of working with
+AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and
+directly with *AttributeValue. This is helpful for marshaling Go types for API
+operations such as PutItem, and unmarshaling Query and Scan APIs' responses.
+
+See the dynamodbattribute package documentation for more information.
+https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/
+
+Expression Builders
+
+The expression package provides utility types and functions to build DynamoDB
+expression for type safe construction of API ExpressionAttributeNames, and
+ExpressionAttribute Values.
+
+The package represents the various DynamoDB Expressions as structs named
+accordingly. For example, ConditionBuilder represents a DynamoDB Condition
+Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on.
+
+See the expression package documentation for more information.
+https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/
+*/
+package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go
new file mode 100644
index 000000000..e38e41daf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/converter.go
@@ -0,0 +1,443 @@
+package dynamodbattribute
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+)
+
+// ConvertToMap accepts a map[string]interface{} or struct and converts it to a
+// map[string]*dynamodb.AttributeValue.
+//
+// If in contains any structs, it is first JSON encoded/decoded it to convert it
+// to a map[string]interface{}, so `json` struct tags are respected.
+//
+// Deprecated: Use MarshalMap instead
+func ConvertToMap(in interface{}) (item map[string]*dynamodb.AttributeValue, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok {
+ err = e
+ } else if s, ok := r.(string); ok {
+ err = fmt.Errorf(s)
+ } else {
+ err = r.(error)
+ }
+ item = nil
+ }
+ }()
+
+ if in == nil {
+ return nil, awserr.New("SerializationError",
+ "in must be a map[string]interface{} or struct, got ", nil)
+ }
+
+ v := reflect.ValueOf(in)
+ if v.Kind() != reflect.Struct && !(v.Kind() == reflect.Map && v.Type().Key().Kind() == reflect.String) {
+ return nil, awserr.New("SerializationError",
+ fmt.Sprintf("in must be a map[string]interface{} or struct, got %s",
+ v.Type().String()),
+ nil)
+ }
+
+ if isTyped(reflect.TypeOf(in)) {
+ var out map[string]interface{}
+ in = convertToUntyped(in, out)
+ }
+
+ item = make(map[string]*dynamodb.AttributeValue)
+ for k, v := range in.(map[string]interface{}) {
+ item[k] = convertTo(v)
+ }
+
+ return item, nil
+}
+
+// ConvertFromMap accepts a map[string]*dynamodb.AttributeValue and converts it to a
+// map[string]interface{} or struct.
+//
+// If v points to a struct, the result is first converted it to a
+// map[string]interface{}, then JSON encoded/decoded it to convert to a struct,
+// so `json` struct tags are respected.
+//
+// Deprecated: Use UnmarshalMap instead
+func ConvertFromMap(item map[string]*dynamodb.AttributeValue, v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok {
+ err = e
+ } else if s, ok := r.(string); ok {
+ err = fmt.Errorf(s)
+ } else {
+ err = r.(error)
+ }
+ item = nil
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return awserr.New("SerializationError",
+ fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s",
+ rv.Type()),
+ nil)
+ }
+ if rv.Elem().Kind() != reflect.Struct && !(rv.Elem().Kind() == reflect.Map && rv.Elem().Type().Key().Kind() == reflect.String) {
+ return awserr.New("SerializationError",
+ fmt.Sprintf("v must be a non-nil pointer to a map[string]interface{} or struct, got %s",
+ rv.Type()),
+ nil)
+ }
+
+ m := make(map[string]interface{})
+ for k, v := range item {
+ m[k] = convertFrom(v)
+ }
+
+ if isTyped(reflect.TypeOf(v)) {
+ err = convertToTyped(m, v)
+ } else {
+ rv.Elem().Set(reflect.ValueOf(m))
+ }
+
+ return err
+}
+
+// ConvertToList accepts an array or slice and converts it to a
+// []*dynamodb.AttributeValue.
+//
+// Converting []byte fields to dynamodb.AttributeValue are only currently supported
+// if the input is a map[string]interface{} type. []byte within typed structs are not
+// converted correctly and are converted into base64 strings. This is a known bug,
+// and will be fixed in a later release.
+//
+// If in contains any structs, it is first JSON encoded/decoded it to convert it
+// to a []interface{}, so `json` struct tags are respected.
+//
+// Deprecated: Use MarshalList instead
+func ConvertToList(in interface{}) (item []*dynamodb.AttributeValue, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok {
+ err = e
+ } else if s, ok := r.(string); ok {
+ err = fmt.Errorf(s)
+ } else {
+ err = r.(error)
+ }
+ item = nil
+ }
+ }()
+
+ if in == nil {
+ return nil, awserr.New("SerializationError",
+ "in must be an array or slice, got ",
+ nil)
+ }
+
+ v := reflect.ValueOf(in)
+ if v.Kind() != reflect.Array && v.Kind() != reflect.Slice {
+ return nil, awserr.New("SerializationError",
+ fmt.Sprintf("in must be an array or slice, got %s",
+ v.Type().String()),
+ nil)
+ }
+
+ if isTyped(reflect.TypeOf(in)) {
+ var out []interface{}
+ in = convertToUntyped(in, out)
+ }
+
+ item = make([]*dynamodb.AttributeValue, 0, len(in.([]interface{})))
+ for _, v := range in.([]interface{}) {
+ item = append(item, convertTo(v))
+ }
+
+ return item, nil
+}
+
+// ConvertFromList accepts a []*dynamodb.AttributeValue and converts it to an array or
+// slice.
+//
+// If v contains any structs, the result is first converted it to a
+// []interface{}, then JSON encoded/decoded it to convert to a typed array or
+// slice, so `json` struct tags are respected.
+//
+// Deprecated: Use UnmarshalList instead
+func ConvertFromList(item []*dynamodb.AttributeValue, v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok {
+ err = e
+ } else if s, ok := r.(string); ok {
+ err = fmt.Errorf(s)
+ } else {
+ err = r.(error)
+ }
+ item = nil
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return awserr.New("SerializationError",
+ fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s",
+ rv.Type()),
+ nil)
+ }
+ if rv.Elem().Kind() != reflect.Array && rv.Elem().Kind() != reflect.Slice {
+ return awserr.New("SerializationError",
+ fmt.Sprintf("v must be a non-nil pointer to an array or slice, got %s",
+ rv.Type()),
+ nil)
+ }
+
+ l := make([]interface{}, 0, len(item))
+ for _, v := range item {
+ l = append(l, convertFrom(v))
+ }
+
+ if isTyped(reflect.TypeOf(v)) {
+ err = convertToTyped(l, v)
+ } else {
+ rv.Elem().Set(reflect.ValueOf(l))
+ }
+
+ return err
+}
+
+// ConvertTo accepts any interface{} and converts it to a *dynamodb.AttributeValue.
+//
+// If in contains any structs, it is first JSON encoded/decoded it to convert it
+// to a interface{}, so `json` struct tags are respected.
+//
+// Deprecated: Use Marshal instead
+func ConvertTo(in interface{}) (item *dynamodb.AttributeValue, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok {
+ err = e
+ } else if s, ok := r.(string); ok {
+ err = fmt.Errorf(s)
+ } else {
+ err = r.(error)
+ }
+ item = nil
+ }
+ }()
+
+ if in != nil && isTyped(reflect.TypeOf(in)) {
+ var out interface{}
+ in = convertToUntyped(in, out)
+ }
+
+ item = convertTo(in)
+ return item, nil
+}
+
+// ConvertFrom accepts a *dynamodb.AttributeValue and converts it to any interface{}.
+//
+// If v contains any structs, the result is first converted it to a interface{},
+// then JSON encoded/decoded it to convert to a struct, so `json` struct tags
+// are respected.
+//
+// Deprecated: Use Unmarshal instead
+func ConvertFrom(item *dynamodb.AttributeValue, v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok {
+ err = e
+ } else if s, ok := r.(string); ok {
+ err = fmt.Errorf(s)
+ } else {
+ err = r.(error)
+ }
+ item = nil
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return awserr.New("SerializationError",
+ fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s",
+ rv.Type()),
+ nil)
+ }
+ if rv.Elem().Kind() != reflect.Interface && rv.Elem().Kind() != reflect.Struct {
+ return awserr.New("SerializationError",
+ fmt.Sprintf("v must be a non-nil pointer to an interface{} or struct, got %s",
+ rv.Type()),
+ nil)
+ }
+
+ res := convertFrom(item)
+
+ if isTyped(reflect.TypeOf(v)) {
+ err = convertToTyped(res, v)
+ } else if res != nil {
+ rv.Elem().Set(reflect.ValueOf(res))
+ }
+
+ return err
+}
+
+func isTyped(v reflect.Type) bool {
+ switch v.Kind() {
+ case reflect.Struct:
+ return true
+ case reflect.Array, reflect.Slice:
+ if isTyped(v.Elem()) {
+ return true
+ }
+ case reflect.Map:
+ if isTyped(v.Key()) {
+ return true
+ }
+ if isTyped(v.Elem()) {
+ return true
+ }
+ case reflect.Ptr:
+ return isTyped(v.Elem())
+ }
+ return false
+}
+
+func convertToUntyped(in, out interface{}) interface{} {
+ b, err := json.Marshal(in)
+ if err != nil {
+ panic(err)
+ }
+
+ decoder := json.NewDecoder(bytes.NewReader(b))
+ decoder.UseNumber()
+ err = decoder.Decode(&out)
+ if err != nil {
+ panic(err)
+ }
+
+ return out
+}
+
+func convertToTyped(in, out interface{}) error {
+ b, err := json.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ decoder := json.NewDecoder(bytes.NewReader(b))
+ return decoder.Decode(&out)
+}
+
+func convertTo(in interface{}) *dynamodb.AttributeValue {
+ a := &dynamodb.AttributeValue{}
+
+ if in == nil {
+ a.NULL = new(bool)
+ *a.NULL = true
+ return a
+ }
+
+ if m, ok := in.(map[string]interface{}); ok {
+ a.M = make(map[string]*dynamodb.AttributeValue)
+ for k, v := range m {
+ a.M[k] = convertTo(v)
+ }
+ return a
+ }
+
+ v := reflect.ValueOf(in)
+ switch v.Kind() {
+ case reflect.Bool:
+ a.BOOL = new(bool)
+ *a.BOOL = v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ a.N = new(string)
+ *a.N = strconv.FormatInt(v.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ a.N = new(string)
+ *a.N = strconv.FormatUint(v.Uint(), 10)
+ case reflect.Float32, reflect.Float64:
+ a.N = new(string)
+ *a.N = strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.String:
+ if n, ok := in.(json.Number); ok {
+ a.N = new(string)
+ *a.N = n.String()
+ } else {
+ a.S = new(string)
+ *a.S = v.String()
+ }
+ case reflect.Slice:
+ switch v.Type() {
+ case reflect.TypeOf(([]byte)(nil)):
+ a.B = v.Bytes()
+ default:
+ a.L = make([]*dynamodb.AttributeValue, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ a.L[i] = convertTo(v.Index(i).Interface())
+ }
+ }
+ default:
+ panic(fmt.Sprintf("the type %s is not supported", v.Type().String()))
+ }
+
+ return a
+}
+
+func convertFrom(a *dynamodb.AttributeValue) interface{} {
+ if a.S != nil {
+ return *a.S
+ }
+
+ if a.N != nil {
+ // Number is tricky b/c we don't know which numeric type to use. Here we
+ // simply try the different types from most to least restrictive.
+ if n, err := strconv.ParseInt(*a.N, 10, 64); err == nil {
+ return int(n)
+ }
+ if n, err := strconv.ParseUint(*a.N, 10, 64); err == nil {
+ return uint(n)
+ }
+ n, err := strconv.ParseFloat(*a.N, 64)
+ if err != nil {
+ panic(err)
+ }
+ return n
+ }
+
+ if a.BOOL != nil {
+ return *a.BOOL
+ }
+
+ if a.NULL != nil {
+ return nil
+ }
+
+ if a.M != nil {
+ m := make(map[string]interface{})
+ for k, v := range a.M {
+ m[k] = convertFrom(v)
+ }
+ return m
+ }
+
+ if a.L != nil {
+ l := make([]interface{}, len(a.L))
+ for index, v := range a.L {
+ l[index] = convertFrom(v)
+ }
+ return l
+ }
+
+ if a.B != nil {
+ return a.B
+ }
+
+ panic(fmt.Sprintf("%#v is not a supported dynamodb.AttributeValue", a))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
new file mode 100644
index 000000000..9e816cdad
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
@@ -0,0 +1,775 @@
+package dynamodbattribute
+
+import (
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+)
+
+// An Unmarshaler is an interface to provide custom unmarshaling of
+// AttributeValues. Use this to provide custom logic determining
+// how AttributeValues should be unmarshaled.
+// type ExampleUnmarshaler struct {
+// Value int
+// }
+//
+// func (u *ExampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+// if av.N == nil {
+// return nil
+// }
+//
+// n, err := strconv.ParseInt(*av.N, 10, 0)
+// if err != nil {
+// return err
+// }
+//
+// u.Value = int(n)
+// return nil
+// }
+type Unmarshaler interface {
+ UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
+}
+
+// Unmarshal will unmarshal DynamoDB AttributeValues to Go value types.
+// Both generic interface{} and concrete types are valid unmarshal
+// destination types.
+//
+// Unmarshal will allocate maps, slices, and pointers as needed to
+// unmarshal the AttributeValue into the provided type value.
+//
+// When unmarshaling AttributeValues into structs Unmarshal matches
+// the field names of the struct to the AttributeValue Map keys.
+// Initially it will look for exact field name matching, but will
+// fall back to case insensitive if not exact match is found.
+//
+// With the exception of omitempty, omitemptyelem, binaryset, numberset
+// and stringset all struct tags used by Marshal are also used by
+// Unmarshal.
+//
+// When decoding AttributeValues to interfaces Unmarshal will use the
+// following types.
+//
+// []byte, AV Binary (B)
+// [][]byte, AV Binary Set (BS)
+// bool, AV Boolean (BOOL)
+// []interface{}, AV List (L)
+// map[string]interface{}, AV Map (M)
+// float64, AV Number (N)
+// Number, AV Number (N) with UseNumber set
+// []float64, AV Number Set (NS)
+// []Number, AV Number Set (NS) with UseNumber set
+// string, AV String (S)
+// []string, AV String Set (SS)
+//
+// If the Decoder option, UseNumber is set numbers will be unmarshaled
+// as Number values instead of float64. Use this to maintain the original
+// string formating of the number as it was represented in the AttributeValue.
+// In addition provides additional opportunities to parse the number
+// string based on individual use cases.
+//
+// When unmarshaling any error that occurs will halt the unmarshal
+// and return the error.
+//
+// The output value provided must be a non-nil pointer
+func Unmarshal(av *dynamodb.AttributeValue, out interface{}) error {
+ return NewDecoder().Decode(av, out)
+}
+
+// UnmarshalMap is an alias for Unmarshal which unmarshals from
+// a map of AttributeValues.
+//
+// The output value provided must be a non-nil pointer
+func UnmarshalMap(m map[string]*dynamodb.AttributeValue, out interface{}) error {
+ return NewDecoder().Decode(&dynamodb.AttributeValue{M: m}, out)
+}
+
+// UnmarshalList is an alias for Unmarshal func which unmarshals
+// a slice of AttributeValues.
+//
+// The output value provided must be a non-nil pointer
+func UnmarshalList(l []*dynamodb.AttributeValue, out interface{}) error {
+ return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out)
+}
+
+// UnmarshalListOfMaps is an alias for Unmarshal func which unmarshals a
+// slice of maps of attribute values.
+//
+// This is useful for when you need to unmarshal the Items from a DynamoDB
+// Query API call.
+//
+// The output value provided must be a non-nil pointer
+func UnmarshalListOfMaps(l []map[string]*dynamodb.AttributeValue, out interface{}) error {
+ items := make([]*dynamodb.AttributeValue, len(l))
+ for i, m := range l {
+ items[i] = &dynamodb.AttributeValue{M: m}
+ }
+
+ return UnmarshalList(items, out)
+}
+
+// A Decoder provides unmarshaling AttributeValues to Go value types.
+type Decoder struct {
+ MarshalOptions
+
+ // Instructs the decoder to decode AttributeValue Numbers as
+ // Number type instead of float64 when the destination type
+ // is interface{}. Similar to encoding/json.Number
+ UseNumber bool
+}
+
+// NewDecoder creates a new Decoder with default configuration. Use
+// the `opts` functional options to override the default configuration.
+func NewDecoder(opts ...func(*Decoder)) *Decoder {
+ d := &Decoder{
+ MarshalOptions: MarshalOptions{
+ SupportJSONTags: true,
+ },
+ }
+ for _, o := range opts {
+ o(d)
+ }
+
+ return d
+}
+
+// Decode will unmarshal an AttributeValue into a Go value type. An error
+// will be return if the decoder is unable to unmarshal the AttributeValue
+// to the provide Go value type.
+//
+// The output value provided must be a non-nil pointer
+func (d *Decoder) Decode(av *dynamodb.AttributeValue, out interface{}, opts ...func(*Decoder)) error {
+ v := reflect.ValueOf(out)
+ if v.Kind() != reflect.Ptr || v.IsNil() || !v.IsValid() {
+ return &InvalidUnmarshalError{Type: reflect.TypeOf(out)}
+ }
+
+ return d.decode(av, v, tag{})
+}
+
+var stringInterfaceMapType = reflect.TypeOf(map[string]interface{}(nil))
+var byteSliceType = reflect.TypeOf([]byte(nil))
+var byteSliceSlicetype = reflect.TypeOf([][]byte(nil))
+var numberType = reflect.TypeOf(Number(""))
+var timeType = reflect.TypeOf(time.Time{})
+var ptrStringType = reflect.TypeOf(aws.String(""))
+
+func (d *Decoder) decode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ var u Unmarshaler
+ if av == nil || av.NULL != nil {
+ u, v = indirect(v, true)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(av)
+ }
+ return d.decodeNull(v)
+ }
+
+ u, v = indirect(v, false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(av)
+ }
+
+ switch {
+ case len(av.B) != 0 || (av.B != nil && d.EnableEmptyCollections):
+ return d.decodeBinary(av.B, v)
+ case av.BOOL != nil:
+ return d.decodeBool(av.BOOL, v)
+ case len(av.BS) != 0 || (av.BS != nil && d.EnableEmptyCollections):
+ return d.decodeBinarySet(av.BS, v)
+ case len(av.L) != 0 || (av.L != nil && d.EnableEmptyCollections):
+ return d.decodeList(av.L, v)
+ case len(av.M) != 0 || (av.M != nil && d.EnableEmptyCollections):
+ return d.decodeMap(av.M, v)
+ case av.N != nil:
+ return d.decodeNumber(av.N, v, fieldTag)
+ case len(av.NS) != 0 || (av.NS != nil && d.EnableEmptyCollections):
+ return d.decodeNumberSet(av.NS, v)
+ case av.S != nil: // DynamoDB does not allow for empty strings, so we do not consider the length or EnableEmptyCollections flag here
+ return d.decodeString(av.S, v, fieldTag)
+ case len(av.SS) != 0 || (av.SS != nil && d.EnableEmptyCollections):
+ return d.decodeStringSet(av.SS, v)
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
+ if v.Kind() == reflect.Interface {
+ buf := make([]byte, len(b))
+ copy(buf, b)
+ v.Set(reflect.ValueOf(buf))
+ return nil
+ }
+
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
+ }
+
+ if v.Type() == byteSliceType {
+ // Optimization for []byte types
+ if v.IsNil() || v.Cap() < len(b) {
+ v.Set(reflect.MakeSlice(byteSliceType, len(b), len(b)))
+ } else if v.Len() != len(b) {
+ v.SetLen(len(b))
+ }
+ copy(v.Interface().([]byte), b)
+ return nil
+ }
+
+ switch v.Type().Elem().Kind() {
+ case reflect.Uint8:
+ // Fallback to reflection copy for type aliased of []byte type
+ if v.Kind() != reflect.Array && (v.IsNil() || v.Cap() < len(b)) {
+ v.Set(reflect.MakeSlice(v.Type(), len(b), len(b)))
+ } else if v.Len() != len(b) {
+ v.SetLen(len(b))
+ }
+ for i := 0; i < len(b); i++ {
+ v.Index(i).SetUint(uint64(b[i]))
+ }
+ default:
+ if v.Kind() == reflect.Array {
+ switch v.Type().Elem().Kind() {
+ case reflect.Uint8:
+ reflect.Copy(v, reflect.ValueOf(b))
+ default:
+ return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
+ }
+
+ break
+ }
+
+ return &UnmarshalTypeError{Value: "binary", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(b *bool, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Bool, reflect.Interface:
+ v.Set(reflect.ValueOf(*b).Convert(v.Type()))
+ default:
+ return &UnmarshalTypeError{Value: "bool", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBinarySet(bs [][]byte, v reflect.Value) error {
+ isArray := false
+
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(bs) {
+ // What about if ignoring nil/empty values?
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(bs)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ isArray = true
+ case reflect.Interface:
+ set := make([][]byte, len(bs))
+ for i, b := range bs {
+ if err := d.decodeBinary(b, reflect.ValueOf(&set[i]).Elem()); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "binary set", Type: v.Type()}
+ }
+
+ for i := 0; i < v.Cap() && i < len(bs); i++ {
+ if !isArray {
+ v.SetLen(i + 1)
+ }
+ u, elem := indirect(v.Index(i), false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{BS: bs})
+ }
+ if err := d.decodeBinary(bs[i], elem); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeNumber(n *string, v reflect.Value, fieldTag tag) error {
+ switch v.Kind() {
+ case reflect.Interface:
+ i, err := d.decodeNumberToInterface(n)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(i))
+ return nil
+ case reflect.String:
+ if v.Type() == numberType { // Support Number value type
+ v.Set(reflect.ValueOf(Number(*n)))
+ return nil
+ }
+ v.Set(reflect.ValueOf(*n))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i, err := strconv.ParseInt(*n, 10, 64)
+ if err != nil {
+ return err
+ }
+ if v.OverflowInt(i) {
+ return &UnmarshalTypeError{
+ Value: fmt.Sprintf("number overflow, %s", *n),
+ Type: v.Type(),
+ }
+ }
+ v.SetInt(i)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i, err := strconv.ParseUint(*n, 10, 64)
+ if err != nil {
+ return err
+ }
+ if v.OverflowUint(i) {
+ return &UnmarshalTypeError{
+ Value: fmt.Sprintf("number overflow, %s", *n),
+ Type: v.Type(),
+ }
+ }
+ v.SetUint(i)
+ case reflect.Float32, reflect.Float64:
+ i, err := strconv.ParseFloat(*n, 64)
+ if err != nil {
+ return err
+ }
+ if v.OverflowFloat(i) {
+ return &UnmarshalTypeError{
+ Value: fmt.Sprintf("number overflow, %s", *n),
+ Type: v.Type(),
+ }
+ }
+ v.SetFloat(i)
+ default:
+ if v.Type().ConvertibleTo(timeType) && fieldTag.AsUnixTime {
+ t, err := decodeUnixTime(*n)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(t).Convert(v.Type()))
+ return nil
+ }
+ return &UnmarshalTypeError{Value: "number", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeNumberToInterface(n *string) (interface{}, error) {
+ if d.UseNumber {
+ return Number(*n), nil
+ }
+
+ // Default to float64 for all numbers
+ return strconv.ParseFloat(*n, 64)
+}
+
+func (d *Decoder) decodeNumberSet(ns []*string, v reflect.Value) error {
+ isArray := false
+
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(ns) {
+ // What about if ignoring nil/empty values?
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(ns)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ isArray = true
+ case reflect.Interface:
+ if d.UseNumber {
+ set := make([]Number, len(ns))
+ for i, n := range ns {
+ if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ } else {
+ set := make([]float64, len(ns))
+ for i, n := range ns {
+ if err := d.decodeNumber(n, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ }
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "number set", Type: v.Type()}
+ }
+
+ for i := 0; i < v.Cap() && i < len(ns); i++ {
+ if !isArray {
+ v.SetLen(i + 1)
+ }
+ u, elem := indirect(v.Index(i), false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{NS: ns})
+ }
+ if err := d.decodeNumber(ns[i], elem, tag{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeList(avList []*dynamodb.AttributeValue, v reflect.Value) error {
+ isArray := false
+
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(avList) {
+ // What about if ignoring nil/empty values?
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(avList)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ isArray = true
+ case reflect.Interface:
+ s := make([]interface{}, len(avList))
+ for i, av := range avList {
+ if err := d.decode(av, reflect.ValueOf(&s[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(s))
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "list", Type: v.Type()}
+ }
+
+ // If v is not a slice, array
+ for i := 0; i < v.Cap() && i < len(avList); i++ {
+ if !isArray {
+ v.SetLen(i + 1)
+ }
+
+ if err := d.decode(avList[i], v.Index(i), tag{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(avMap map[string]*dynamodb.AttributeValue, v reflect.Value) error {
+ switch v.Kind() {
+ case reflect.Map:
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ return &UnmarshalTypeError{Value: "map string key", Type: t.Key()}
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+ case reflect.Interface:
+ v.Set(reflect.MakeMap(stringInterfaceMapType))
+ v = v.Elem()
+ default:
+ return &UnmarshalTypeError{Value: "map", Type: v.Type()}
+ }
+
+ if v.Kind() == reflect.Map {
+ for k, av := range avMap {
+ key := reflect.New(v.Type().Key()).Elem()
+ key.SetString(k)
+ elem := reflect.New(v.Type().Elem()).Elem()
+ if err := d.decode(av, elem, tag{}); err != nil {
+ return err
+ }
+ v.SetMapIndex(key, elem)
+ }
+ } else if v.Kind() == reflect.Struct {
+ fields := unionStructFields(v.Type(), d.MarshalOptions)
+ for k, av := range avMap {
+ if f, ok := fieldByName(fields, k); ok {
+ fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
+ v.Set(reflect.New(v.Type().Elem()))
+ return true // to continue the loop.
+ })
+ if err := d.decode(av, fv, f.tag); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeNull(v reflect.Value) error {
+ if v.IsValid() && v.CanSet() {
+ v.Set(reflect.Zero(v.Type()))
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeString(s *string, v reflect.Value, fieldTag tag) error {
+ if fieldTag.AsString {
+ return d.decodeNumber(s, v, fieldTag)
+ }
+
+ // To maintain backwards compatibility with ConvertFrom family of methods which
+ // converted strings to time.Time structs
+ if v.Type().ConvertibleTo(timeType) {
+ t, err := time.Parse(time.RFC3339, *s)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(t).Convert(v.Type()))
+ return nil
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ v.SetString(*s)
+ case reflect.Slice:
+ // To maintain backwards compatibility with the ConvertFrom family of methods
+ // which converted []byte into base64-encoded strings if the input was typed
+ if v.Type() == byteSliceType {
+ decoded, err := base64.StdEncoding.DecodeString(*s)
+ if err != nil {
+ return &UnmarshalError{Err: err, Value: "string", Type: v.Type()}
+ }
+ v.SetBytes(decoded)
+ }
+ case reflect.Interface:
+ // Ensure type aliasing is handled properly
+ v.Set(reflect.ValueOf(*s).Convert(v.Type()))
+ default:
+ return &UnmarshalTypeError{Value: "string", Type: v.Type()}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeStringSet(ss []*string, v reflect.Value) error {
+ isArray := false
+
+ switch v.Kind() {
+ case reflect.Slice:
+ // Make room for the slice elements if needed
+ if v.IsNil() || v.Cap() < len(ss) {
+ v.Set(reflect.MakeSlice(v.Type(), 0, len(ss)))
+ }
+ case reflect.Array:
+ // Limited to capacity of existing array.
+ isArray = true
+ case reflect.Interface:
+ set := make([]string, len(ss))
+ for i, s := range ss {
+ if err := d.decodeString(s, reflect.ValueOf(&set[i]).Elem(), tag{}); err != nil {
+ return err
+ }
+ }
+ v.Set(reflect.ValueOf(set))
+ return nil
+ default:
+ return &UnmarshalTypeError{Value: "string set", Type: v.Type()}
+ }
+
+ for i := 0; i < v.Cap() && i < len(ss); i++ {
+ if !isArray {
+ v.SetLen(i + 1)
+ }
+ u, elem := indirect(v.Index(i), false)
+ if u != nil {
+ return u.UnmarshalDynamoDBAttributeValue(&dynamodb.AttributeValue{SS: ss})
+ }
+ if err := d.decodeString(ss[i], elem, tag{}); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func decodeUnixTime(n string) (time.Time, error) {
+ v, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ return time.Time{}, &UnmarshalError{
+ Err: err, Value: n, Type: timeType,
+ }
+ }
+
+ return time.Unix(v, 0), nil
+}
+
+// indirect will walk a value's interface or pointer value types. Returning
+// the final value or the value a unmarshaler is defined on.
+//
+// Based on the enoding/json type reflect value type indirection in Go Stdlib
+// https://golang.org/src/encoding/json/decode.go indirect func.
+func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+
+ return nil, v
+}
+
+// A Number represents a Attributevalue number literal.
+type Number string
+
+// Float64 attempts to cast the number ot a float64, returning
+// the result of the case or error if the case failed.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 attempts to cast the number ot a int64, returning
+// the result of the case or error if the case failed.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// Uint64 attempts to cast the number ot a uint64, returning
+// the result of the case or error if the case failed.
+func (n Number) Uint64() (uint64, error) {
+ return strconv.ParseUint(string(n), 10, 64)
+}
+
+// String returns the raw number represented as a string
+func (n Number) String() string {
+ return string(n)
+}
+
+type emptyOrigError struct{}
+
+func (e emptyOrigError) OrigErr() error {
+ return nil
+}
+
+// An UnmarshalTypeError is an error type representing a error
+// unmarshaling the AttributeValue's element to a Go value type.
+// Includes details about the AttributeValue type and Go value type.
+type UnmarshalTypeError struct {
+ emptyOrigError
+ Value string
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *UnmarshalTypeError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *UnmarshalTypeError) Code() string {
+ return "UnmarshalTypeError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *UnmarshalTypeError) Message() string {
+ return "cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError is an error type representing an invalid type
+// encountered while unmarshaling a AttributeValue to a Go value type.
+type InvalidUnmarshalError struct {
+ emptyOrigError
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *InvalidUnmarshalError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *InvalidUnmarshalError) Code() string {
+ return "InvalidUnmarshalError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *InvalidUnmarshalError) Message() string {
+ if e.Type == nil {
+ return "cannot unmarshal to nil value"
+ }
+ if e.Type.Kind() != reflect.Ptr {
+ return "cannot unmarshal to non-pointer value, got " + e.Type.String()
+ }
+ return "cannot unmarshal to nil value, " + e.Type.String()
+}
+
+// An UnmarshalError wraps an error that occurred while unmarshaling a DynamoDB
+// AttributeValue element into a Go type. This is different from UnmarshalTypeError
+// in that it wraps the underlying error that occurred.
+type UnmarshalError struct {
+ Err error
+ Value string
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface.
+func (e *UnmarshalError) Error() string {
+ return fmt.Sprintf("%s: %s\ncaused by: %v", e.Code(), e.Message(), e.Err)
+}
+
+// OrigErr returns the original error that caused this issue.
+func (e UnmarshalError) OrigErr() error {
+ return e.Err
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *UnmarshalError) Code() string {
+ return "UnmarshalError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *UnmarshalError) Message() string {
+ return fmt.Sprintf("cannot unmarshal %q into %s.",
+ e.Value, e.Type.String())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go
new file mode 100644
index 000000000..b83a29c95
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go
@@ -0,0 +1,101 @@
+// Package dynamodbattribute provides marshaling and unmarshaling utilities to
+// convert between Go types and dynamodb.AttributeValues.
+//
+// These utilities allow you to marshal slices, maps, structs, and scalar values
+// to and from dynamodb.AttributeValue. These are useful when marshaling
+// Go value tyes to dynamodb.AttributeValue for DynamoDB requests, or
+// unmarshaling the dynamodb.AttributeValue back into a Go value type.
+//
+// AttributeValue Marshaling
+//
+// To marshal a Go type to a dynamodbAttributeValue you can use the Marshal
+// functions in the dynamodbattribute package. There are specialized versions
+// of these functions for collections of Attributevalue, such as maps and lists.
+//
+// The following example uses MarshalMap to convert the Record Go type to a
+// dynamodb.AttributeValue type and use the value to make a PutItem API request.
+//
+// type Record struct {
+// ID string
+// URLs []string
+// }
+//
+// //...
+//
+// r := Record{
+// ID: "ABC123",
+// URLs: []string{
+// "https://example.com/first/link",
+// "https://example.com/second/url",
+// },
+// }
+// av, err := dynamodbattribute.MarshalMap(r)
+// if err != nil {
+// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err))
+// }
+//
+// _, err = svc.PutItem(&dynamodb.PutItemInput{
+// TableName: aws.String(myTableName),
+// Item: av,
+// })
+// if err != nil {
+// panic(fmt.Sprintf("failed to put Record to DynamoDB, %v", err))
+// }
+//
+// AttributeValue Unmarshaling
+//
+// To unmarshal a dynamodb.AttributeValue to a Go type you can use the Unmarshal
+// functions in the dynamodbattribute package. There are specialized versions
+// of these functions for collections of Attributevalue, such as maps and lists.
+//
+// The following example will unmarshal the DynamoDB's Scan API operation. The
+// Items returned by the operation will be unmarshaled into the slice of Records
+// Go type.
+//
+// type Record struct {
+// ID string
+// URLs []string
+// }
+//
+// //...
+//
+// var records []Record
+//
+// // Use the ScanPages method to perform the scan with pagination. Use
+// // just Scan method to make the API call without pagination.
+// err := svc.ScanPages(&dynamodb.ScanInput{
+// TableName: aws.String(myTableName),
+// }, func(page *dynamodb.ScanOutput, last bool) bool {
+// recs := []Record{}
+//
+// err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &recs)
+// if err != nil {
+// panic(fmt.Sprintf("failed to unmarshal Dynamodb Scan Items, %v", err))
+// }
+//
+// records = append(records, recs...)
+//
+// return true // keep paging
+// })
+//
+// The ConvertTo, ConvertToList, ConvertToMap, ConvertFrom, ConvertFromMap
+// and ConvertFromList methods have been deprecated. The Marshal and Unmarshal
+// functions should be used instead. The ConvertTo|From marshallers do not
+// support BinarySet, NumberSet, nor StringSets, and will incorrectly marshal
+// binary data fields in structs as base64 strings.
+//
+// The Marshal and Unmarshal functions correct this behavior, and removes
+// the reliance on encoding.json. `json` struct tags are still supported. In
+// addition support for a new struct tag `dynamodbav` was added. Support for
+// the json.Marshaler and json.Unmarshaler interfaces have been removed and
+// replaced with have been replaced with dynamodbattribute.Marshaler and
+// dynamodbattribute.Unmarshaler interfaces.
+//
+// The Unmarshal functions are backwards compatible with data marshalled by
+// ConvertTo*, but the reverse is not true: objects marshalled using Marshal
+// are not necessarily usable by ConvertFrom*. This backward compatibility is
+// intended to assist with incremental upgrading of data following a switch
+// away from the Convert* family of functions.
+//
+// `time.Time` is marshaled as RFC3339 format.
+package dynamodbattribute
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go
new file mode 100644
index 000000000..c03e01d59
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go
@@ -0,0 +1,665 @@
+package dynamodbattribute
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+)
+
+// An UnixTime provides aliasing of time.Time into a type that when marshaled
+// and unmarshaled with DynamoDB AttributeValues it will be done so as number
+// instead of string in seconds since January 1, 1970 UTC.
+//
+// This type is useful as an alternative to the struct tag `unixtime` when you
+// want to have your time value marshaled as Unix time in seconds intead of
+// the default time.RFC3339.
+//
+// Important to note that zero value time as unixtime is not 0 seconds
+// from January 1, 1970 UTC, but -62135596800. Which is seconds between
+// January 1, 0001 UTC, and January 1, 0001 UTC.
+type UnixTime time.Time
+
+// MarshalDynamoDBAttributeValue implements the Marshaler interface so that
+// the UnixTime can be marshaled from to a DynamoDB AttributeValue number
+// value encoded in the number of seconds since January 1, 1970 UTC.
+func (e UnixTime) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+ t := time.Time(e)
+ s := strconv.FormatInt(t.Unix(), 10)
+ av.N = &s
+
+ return nil
+}
+
+// UnmarshalDynamoDBAttributeValue implements the Unmarshaler interface so that
+// the UnixTime can be unmarshaled from a DynamoDB AttributeValue number representing
+// the number of seconds since January 1, 1970 UTC.
+//
+// If an error parsing the AttributeValue number occurs UnmarshalError will be
+// returned.
+func (e *UnixTime) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+ t, err := decodeUnixTime(aws.StringValue(av.N))
+ if err != nil {
+ return err
+ }
+
+ *e = UnixTime(t)
+ return nil
+}
+
+// A Marshaler is an interface to provide custom marshaling of Go value types
+// to AttributeValues. Use this to provide custom logic determining how a
+// Go Value type should be marshaled.
+//
+// type ExampleMarshaler struct {
+// Value int
+// }
+// func (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
+// n := fmt.Sprintf("%v", m.Value)
+// av.N = &n
+// return nil
+// }
+//
+type Marshaler interface {
+ MarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error
+}
+
+// Marshal will serialize the passed in Go value type into a DynamoDB AttributeValue
+// type. This value can be used in DynamoDB API operations to simplify marshaling
+// your Go value types into AttributeValues.
+//
+// Marshal will recursively transverse the passed in value marshaling its
+// contents into a AttributeValue. Marshal supports basic scalars
+// (int,uint,float,bool,string), maps, slices, and structs. Anonymous
+// nested types are flattened based on Go anonymous type visibility.
+//
+// Marshaling slices to AttributeValue will default to a List for all
+// types except for []byte and [][]byte. []byte will be marshaled as
+// Binary data (B), and [][]byte will be marshaled as binary data set
+// (BS).
+//
+// `dynamodbav` struct tag can be used to control how the value will be
+// marshaled into a AttributeValue.
+//
+// // Field is ignored
+// Field int `dynamodbav:"-"`
+//
+// // Field AttributeValue map key "myName"
+// Field int `dynamodbav:"myName"`
+//
+// // Field AttributeValue map key "myName", and
+// // Field is omitted if it is empty
+// Field int `dynamodbav:"myName,omitempty"`
+//
+// // Field AttributeValue map key "Field", and
+// // Field is omitted if it is empty
+// Field int `dynamodbav:",omitempty"`
+//
+// // Field's elems will be omitted if empty
+// // only valid for slices, and maps.
+// Field []string `dynamodbav:",omitemptyelem"`
+//
+// // Field will be marshaled as a AttributeValue string
+// // only value for number types, (int,uint,float)
+// Field int `dynamodbav:",string"`
+//
+// // Field will be marshaled as a binary set
+// Field [][]byte `dynamodbav:",binaryset"`
+//
+// // Field will be marshaled as a number set
+// Field []int `dynamodbav:",numberset"`
+//
+// // Field will be marshaled as a string set
+// Field []string `dynamodbav:",stringset"`
+//
+// // Field will be marshaled as Unix time number in seconds.
+// // This tag is only valid with time.Time typed struct fields.
+// // Important to note that zero value time as unixtime is not 0 seconds
+// // from January 1, 1970 UTC, but -62135596800. Which is seconds between
+// // January 1, 0001 UTC, and January 1, 0001 UTC.
+// Field time.Time `dynamodbav:",unixtime"`
+//
+// The omitempty tag is only used during Marshaling and is ignored for
+// Unmarshal. Any zero value or a value when marshaled results in a
+// AttributeValue NULL will be added to AttributeValue Maps during struct
+// marshal. The omitemptyelem tag works the same as omitempty except it
+// applies to maps and slices instead of struct fields, and will not be
+// included in the marshaled AttributeValue Map, List, or Set.
+//
+// For convenience and backwards compatibility with ConvertTo functions
+// json struct tags are supported by the Marshal and Unmarshal. If
+// both json and dynamodbav struct tags are provided the json tag will
+// be ignored in favor of dynamodbav.
+//
+// All struct fields and with anonymous fields, are marshaled unless the
+// any of the following conditions are meet.
+//
+// - the field is not exported
+// - json or dynamodbav field tag is "-"
+// - json or dynamodbav field tag specifies "omitempty", and is empty.
+//
+// Pointer and interfaces values encode as the value pointed to or contained
+// in the interface. A nil value encodes as the AttributeValue NULL value.
+//
+// Channel, complex, and function values are not encoded and will be skipped
+// when walking the value to be marshaled.
+//
+// When marshaling any error that occurs will halt the marshal and return
+// the error.
+//
+// Marshal cannot represent cyclic data structures and will not handle them.
+// Passing cyclic structures to Marshal will result in an infinite recursion.
+func Marshal(in interface{}) (*dynamodb.AttributeValue, error) {
+ return NewEncoder().Encode(in)
+}
+
+// MarshalMap is an alias for Marshal func which marshals Go value
+// type to a map of AttributeValues.
+//
+// This is useful for DynamoDB APIs such as PutItem.
+func MarshalMap(in interface{}) (map[string]*dynamodb.AttributeValue, error) {
+ av, err := NewEncoder().Encode(in)
+ if err != nil || av == nil || av.M == nil {
+ return map[string]*dynamodb.AttributeValue{}, err
+ }
+
+ return av.M, nil
+}
+
+// MarshalList is an alias for Marshal func which marshals Go value
+// type to a slice of AttributeValues.
+func MarshalList(in interface{}) ([]*dynamodb.AttributeValue, error) {
+ av, err := NewEncoder().Encode(in)
+ if err != nil || av == nil || av.L == nil {
+ return []*dynamodb.AttributeValue{}, err
+ }
+
+ return av.L, nil
+}
+
+// A MarshalOptions is a collection of options shared between marshaling
+// and unmarshaling
+type MarshalOptions struct {
+ // States that the encoding/json struct tags should be supported.
+ // if a `dynamodbav` struct tag is also provided the encoding/json
+ // tag will be ignored.
+ //
+ // Enabled by default.
+ SupportJSONTags bool
+
+ // Support other custom struct tag keys, such as `yaml` or `toml`.
+ // Note that values provided with a custom TagKey must also be supported
+ // by the (un)marshalers in this package.
+ TagKey string
+
+ // EnableEmptyCollections modifies how structures, maps, and slices are (un)marshalled.
+ // When set to true empty collection values will be preserved as their respective
+ // empty DynamoDB AttributeValue type when set to true.
+ //
+ // Disabled by default.
+ EnableEmptyCollections bool
+}
+
+// An Encoder provides marshaling Go value types to AttributeValues.
+type Encoder struct {
+ MarshalOptions
+
+ // Empty strings, "", will be marked as NULL AttributeValue types.
+ // Empty strings are not valid values for DynamoDB. Will not apply
+ // to lists, sets, or maps. Use the struct tag `omitemptyelem`
+ // to skip empty (zero) values in lists, sets and maps.
+ //
+ // Enabled by default.
+ NullEmptyString bool
+}
+
+// NewEncoder creates a new Encoder with default configuration. Use
+// the `opts` functional options to override the default configuration.
+func NewEncoder(opts ...func(*Encoder)) *Encoder {
+ e := &Encoder{
+ MarshalOptions: MarshalOptions{
+ SupportJSONTags: true,
+ },
+ NullEmptyString: true,
+ }
+ for _, o := range opts {
+ o(e)
+ }
+
+ return e
+}
+
+// Encode will marshal a Go value type to an AttributeValue. Returning
+// the AttributeValue constructed or error.
+func (e *Encoder) Encode(in interface{}) (*dynamodb.AttributeValue, error) {
+ av := &dynamodb.AttributeValue{}
+ if err := e.encode(av, reflect.ValueOf(in), tag{}); err != nil {
+ return nil, err
+ }
+
+ return av, nil
+}
+
+func fieldByIndex(v reflect.Value, index []int,
+ OnEmbeddedNilStruct func(*reflect.Value) bool) reflect.Value {
+ fv := v
+ for i, x := range index {
+ if i > 0 {
+ if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
+ if fv.IsNil() && !OnEmbeddedNilStruct(&fv) {
+ break
+ }
+ fv = fv.Elem()
+ }
+ }
+ fv = fv.Field(x)
+ }
+ return fv
+}
+
+func (e *Encoder) encode(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ // We should check for omitted values first before dereferencing.
+ if fieldTag.OmitEmpty && emptyValue(v, e.EnableEmptyCollections) {
+ encodeNull(av)
+ return nil
+ }
+
+ // Handle both pointers and interface conversion into types
+ v = valueElem(v)
+
+ if v.Kind() != reflect.Invalid {
+ if used, err := tryMarshaler(av, v); used {
+ return err
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Invalid:
+ encodeNull(av)
+ case reflect.Struct:
+ return e.encodeStruct(av, v, fieldTag)
+ case reflect.Map:
+ return e.encodeMap(av, v, fieldTag)
+ case reflect.Slice, reflect.Array:
+ return e.encodeSlice(av, v, fieldTag)
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ // do nothing for unsupported types
+ default:
+ return e.encodeScalar(av, v, fieldTag)
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeStruct(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ // To maintain backwards compatibility with ConvertTo family of methods which
+ // converted time.Time structs to strings
+ if v.Type().ConvertibleTo(timeType) {
+ var t time.Time
+ t = v.Convert(timeType).Interface().(time.Time)
+ if fieldTag.AsUnixTime {
+ return UnixTime(t).MarshalDynamoDBAttributeValue(av)
+ }
+ s := t.Format(time.RFC3339Nano)
+ av.S = &s
+ return nil
+ }
+
+ av.M = map[string]*dynamodb.AttributeValue{}
+ fields := unionStructFields(v.Type(), e.MarshalOptions)
+ for _, f := range fields {
+ if f.Name == "" {
+ return &InvalidMarshalError{msg: "map key cannot be empty"}
+ }
+
+ found := true
+ fv := fieldByIndex(v, f.Index, func(v *reflect.Value) bool {
+ found = false
+ return false // to break the loop.
+ })
+ if !found {
+ continue
+ }
+ elem := &dynamodb.AttributeValue{}
+ err := e.encode(elem, fv, f.tag)
+ if err != nil {
+ return err
+ }
+ skip, err := keepOrOmitEmpty(f.OmitEmpty, elem, err)
+ if err != nil {
+ return err
+ } else if skip {
+ continue
+ }
+
+ av.M[f.Name] = elem
+ }
+ if len(av.M) == 0 && !e.EnableEmptyCollections {
+ encodeNull(av)
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeMap(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ av.M = map[string]*dynamodb.AttributeValue{}
+ for _, key := range v.MapKeys() {
+ keyName := fmt.Sprint(key.Interface())
+ if keyName == "" {
+ return &InvalidMarshalError{msg: "map key cannot be empty"}
+ }
+
+ elemVal := v.MapIndex(key)
+ elem := &dynamodb.AttributeValue{}
+ err := e.encode(elem, elemVal, tag{})
+ skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, elem, err)
+ if err != nil {
+ return err
+ } else if skip {
+ continue
+ }
+
+ av.M[keyName] = elem
+ }
+
+ if v.IsNil() || (len(av.M) == 0 && !e.EnableEmptyCollections) {
+ encodeNull(av)
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeSlice(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ if v.Kind() == reflect.Array && v.Len() == 0 && e.EnableEmptyCollections && fieldTag.OmitEmpty {
+ encodeNull(av)
+ return nil
+ }
+
+ switch v.Type().Elem().Kind() {
+ case reflect.Uint8:
+ slice := reflect.MakeSlice(byteSliceType, v.Len(), v.Len())
+ reflect.Copy(slice, v)
+
+ b := slice.Bytes()
+ if (v.Kind() == reflect.Slice && v.IsNil()) || (len(b) == 0 && !e.EnableEmptyCollections) {
+ encodeNull(av)
+ return nil
+ }
+ av.B = append([]byte{}, b...)
+ default:
+ var elemFn func(dynamodb.AttributeValue) error
+
+ if fieldTag.AsBinSet || v.Type() == byteSliceSlicetype { // Binary Set
+ av.BS = make([][]byte, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ if elem.B == nil {
+ return &InvalidMarshalError{msg: "binary set must only contain non-nil byte slices"}
+ }
+ av.BS = append(av.BS, elem.B)
+ return nil
+ }
+ } else if fieldTag.AsNumSet { // Number Set
+ av.NS = make([]*string, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ if elem.N == nil {
+ return &InvalidMarshalError{msg: "number set must only contain non-nil string numbers"}
+ }
+ av.NS = append(av.NS, elem.N)
+ return nil
+ }
+ } else if fieldTag.AsStrSet { // String Set
+ av.SS = make([]*string, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ if elem.S == nil {
+ return &InvalidMarshalError{msg: "string set must only contain non-nil strings"}
+ }
+ av.SS = append(av.SS, elem.S)
+ return nil
+ }
+ } else { // List
+ av.L = make([]*dynamodb.AttributeValue, 0, v.Len())
+ elemFn = func(elem dynamodb.AttributeValue) error {
+ av.L = append(av.L, &elem)
+ return nil
+ }
+ }
+
+ if n, err := e.encodeList(v, fieldTag, elemFn); err != nil {
+ return err
+ } else if (v.Kind() == reflect.Slice && v.IsNil()) || (n == 0 && !e.EnableEmptyCollections) {
+ encodeNull(av)
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeList(v reflect.Value, fieldTag tag, elemFn func(dynamodb.AttributeValue) error) (int, error) {
+ count := 0
+ for i := 0; i < v.Len(); i++ {
+ elem := dynamodb.AttributeValue{}
+ err := e.encode(&elem, v.Index(i), tag{OmitEmpty: fieldTag.OmitEmptyElem})
+ skip, err := keepOrOmitEmpty(fieldTag.OmitEmptyElem, &elem, err)
+ if err != nil {
+ return 0, err
+ } else if skip {
+ continue
+ }
+
+ if err := elemFn(elem); err != nil {
+ return 0, err
+ }
+ count++
+ }
+
+ return count, nil
+}
+
+func (e *Encoder) encodeScalar(av *dynamodb.AttributeValue, v reflect.Value, fieldTag tag) error {
+ if v.Type() == numberType {
+ s := v.String()
+ if fieldTag.AsString {
+ av.S = &s
+ } else {
+ av.N = &s
+ }
+ return nil
+ }
+
+ switch v.Kind() {
+ case reflect.Bool:
+ av.BOOL = new(bool)
+ *av.BOOL = v.Bool()
+ case reflect.String:
+ if err := e.encodeString(av, v); err != nil {
+ return err
+ }
+ default:
+ // Fallback to encoding numbers, will return invalid type if not supported
+ if err := e.encodeNumber(av, v); err != nil {
+ return err
+ }
+ if fieldTag.AsString && av.NULL == nil && av.N != nil {
+ av.S = av.N
+ av.N = nil
+ }
+ }
+
+ return nil
+}
+
+func (e *Encoder) encodeNumber(av *dynamodb.AttributeValue, v reflect.Value) error {
+ if used, err := tryMarshaler(av, v); used {
+ return err
+ }
+
+ var out string
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out = encodeInt(v.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ out = encodeUint(v.Uint())
+ case reflect.Float32:
+ out = encodeFloat(v.Float(), 32)
+ case reflect.Float64:
+ out = encodeFloat(v.Float(), 64)
+ default:
+ return &unsupportedMarshalTypeError{Type: v.Type()}
+ }
+
+ av.N = &out
+
+ return nil
+}
+
+func (e *Encoder) encodeString(av *dynamodb.AttributeValue, v reflect.Value) error {
+ if used, err := tryMarshaler(av, v); used {
+ return err
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ s := v.String()
+ if len(s) == 0 && e.NullEmptyString {
+ encodeNull(av)
+ } else {
+ av.S = &s
+ }
+ default:
+ return &unsupportedMarshalTypeError{Type: v.Type()}
+ }
+
+ return nil
+}
+
+func encodeInt(i int64) string {
+ return strconv.FormatInt(i, 10)
+}
+func encodeUint(u uint64) string {
+ return strconv.FormatUint(u, 10)
+}
+func encodeFloat(f float64, bitSize int) string {
+ return strconv.FormatFloat(f, 'f', -1, bitSize)
+}
+func encodeNull(av *dynamodb.AttributeValue) {
+ t := true
+ *av = dynamodb.AttributeValue{NULL: &t}
+}
+
+func valueElem(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr:
+ for v.Kind() == reflect.Interface || v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ }
+
+ return v
+}
+
+func emptyValue(v reflect.Value, emptyCollections bool) bool {
+ switch v.Kind() {
+ case reflect.Array:
+ return v.Len() == 0 && !emptyCollections
+ case reflect.Map, reflect.Slice:
+ return v.IsNil() || (v.Len() == 0 && !emptyCollections)
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func tryMarshaler(av *dynamodb.AttributeValue, v reflect.Value) (bool, error) {
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+
+ if v.Type().NumMethod() == 0 {
+ return false, nil
+ }
+
+ if m, ok := v.Interface().(Marshaler); ok {
+ return true, m.MarshalDynamoDBAttributeValue(av)
+ }
+
+ return false, nil
+}
+
+func keepOrOmitEmpty(omitEmpty bool, av *dynamodb.AttributeValue, err error) (bool, error) {
+ if err != nil {
+ if _, ok := err.(*unsupportedMarshalTypeError); ok {
+ return true, nil
+ }
+ return false, err
+ }
+
+ if av.NULL != nil && omitEmpty {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// An InvalidMarshalError is an error type representing an error
+// occurring when marshaling a Go value type to an AttributeValue.
+type InvalidMarshalError struct {
+ emptyOrigError
+ msg string
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *InvalidMarshalError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *InvalidMarshalError) Code() string {
+ return "InvalidMarshalError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *InvalidMarshalError) Message() string {
+ return e.msg
+}
+
+// An unsupportedMarshalTypeError represents a Go value type
+// which cannot be marshaled into an AttributeValue and should
+// be skipped by the marshaler.
+type unsupportedMarshalTypeError struct {
+ emptyOrigError
+ Type reflect.Type
+}
+
+// Error returns the string representation of the error.
+// satisfying the error interface
+func (e *unsupportedMarshalTypeError) Error() string {
+ return fmt.Sprintf("%s: %s", e.Code(), e.Message())
+}
+
+// Code returns the code of the error, satisfying the awserr.Error
+// interface.
+func (e *unsupportedMarshalTypeError) Code() string {
+ return "unsupportedMarshalTypeError"
+}
+
+// Message returns the detailed message of the error, satisfying
+// the awserr.Error interface.
+func (e *unsupportedMarshalTypeError) Message() string {
+ return "Go value type " + e.Type.String() + " is not supported"
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go
new file mode 100644
index 000000000..f1c74fec3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/field.go
@@ -0,0 +1,273 @@
+package dynamodbattribute
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+)
+
+type field struct {
+ tag
+
+ Name string
+ NameFromTag bool
+
+ Index []int
+ Type reflect.Type
+}
+
+func fieldByName(fields []field, name string) (field, bool) {
+ foldExists := false
+ foldField := field{}
+
+ for _, f := range fields {
+ if f.Name == name {
+ return f, true
+ }
+ if !foldExists && strings.EqualFold(f.Name, name) {
+ foldField = f
+ foldExists = true
+ }
+ }
+
+ return foldField, foldExists
+}
+
+func buildField(pIdx []int, i int, sf reflect.StructField, fieldTag tag) field {
+ f := field{
+ Name: sf.Name,
+ Type: sf.Type,
+ tag: fieldTag,
+ }
+ if len(fieldTag.Name) != 0 {
+ f.NameFromTag = true
+ f.Name = fieldTag.Name
+ }
+
+ f.Index = make([]int, len(pIdx)+1)
+ copy(f.Index, pIdx)
+ f.Index[len(pIdx)] = i
+
+ return f
+}
+
+func unionStructFields(t reflect.Type, opts MarshalOptions) []field {
+ fields := enumFields(t, opts)
+
+ sort.Sort(fieldsByName(fields))
+
+ fields = visibleFields(fields)
+
+ return fields
+}
+
+// enumFields will recursively iterate through a structure and its nested
+// anonymous fields.
+//
+// Based on the enoding/json struct field enumeration of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go typeField func.
+func enumFields(t reflect.Type, opts MarshalOptions) []field {
+ // Fields to explore
+ current := []field{}
+ next := []field{{Type: t}}
+
+ // count of queued names
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ visited := map[reflect.Type]struct{}{}
+ fields := []field{}
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if _, ok := visited[f.Type]; ok {
+ continue
+ }
+ visited[f.Type] = struct{}{}
+
+ for i := 0; i < f.Type.NumField(); i++ {
+ sf := f.Type.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous {
+ // Ignore unexported and non-anonymous fields
+ // unexported but anonymous field may still be used if
+ // the type has exported nested fields
+ continue
+ }
+
+ fieldTag := tag{}
+ fieldTag.parseAVTag(sf.Tag)
+ // Because MarshalOptions.TagKey must be explicitly set, use it
+ // over JSON, which is enabled by default.
+ if opts.TagKey != "" && fieldTag == (tag{}) {
+ fieldTag.parseStructTag(opts.TagKey, sf.Tag)
+ } else if opts.SupportJSONTags && fieldTag == (tag{}) {
+ fieldTag.parseStructTag("json", sf.Tag)
+ }
+
+ if fieldTag.Ignore {
+ continue
+ }
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+
+ structField := buildField(f.Index, i, sf, fieldTag)
+ structField.Type = ft
+
+ if !sf.Anonymous || ft.Kind() != reflect.Struct {
+ fields = append(fields, structField)
+ if count[f.Type] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, structField)
+ }
+ continue
+ }
+
+ // Record new anon struct to explore next round
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, structField)
+ }
+ }
+ }
+ }
+
+ return fields
+}
+
+// visibleFields will return a slice of fields which are visible based on
+// Go's standard visiblity rules with the exception of ties being broken
+// by depth and struct tag naming.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go typeField func.
+func visibleFields(fields []field) []field {
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.Name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.Name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(fieldsByIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go dominantField func.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].Index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.Index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.NameFromTag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+// fieldsByName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go fieldsByName type.
+type fieldsByName []field
+
+func (x fieldsByName) Len() int { return len(x) }
+
+func (x fieldsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x fieldsByName) Less(i, j int) bool {
+ if x[i].Name != x[j].Name {
+ return x[i].Name < x[j].Name
+ }
+ if len(x[i].Index) != len(x[j].Index) {
+ return len(x[i].Index) < len(x[j].Index)
+ }
+ if x[i].NameFromTag != x[j].NameFromTag {
+ return x[i].NameFromTag
+ }
+ return fieldsByIndex(x).Less(i, j)
+}
+
+// fieldsByIndex sorts field by index sequence.
+//
+// Based on the enoding/json field filtering of the Go Stdlib
+// https://golang.org/src/encoding/json/encode.go fieldsByIndex type.
+type fieldsByIndex []field
+
+func (x fieldsByIndex) Len() int { return len(x) }
+
+func (x fieldsByIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x fieldsByIndex) Less(i, j int) bool {
+ for k, xik := range x[i].Index {
+ if k >= len(x[j].Index) {
+ return false
+ }
+ if xik != x[j].Index[k] {
+ return xik < x[j].Index[k]
+ }
+ }
+ return len(x[i].Index) < len(x[j].Index)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go
new file mode 100644
index 000000000..8b76a7e91
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/tag.go
@@ -0,0 +1,68 @@
+package dynamodbattribute
+
+import (
+ "reflect"
+ "strings"
+)
+
+type tag struct {
+ Name string
+ Ignore bool
+ OmitEmpty bool
+ OmitEmptyElem bool
+ AsString bool
+ AsBinSet, AsNumSet, AsStrSet bool
+ AsUnixTime bool
+}
+
+func (t *tag) parseAVTag(structTag reflect.StructTag) {
+ tagStr := structTag.Get("dynamodbav")
+ if len(tagStr) == 0 {
+ return
+ }
+
+ t.parseTagStr(tagStr)
+}
+
+func (t *tag) parseStructTag(tag string, structTag reflect.StructTag) {
+ tagStr := structTag.Get(tag)
+ if len(tagStr) == 0 {
+ return
+ }
+
+ t.parseTagStr(tagStr)
+}
+
+func (t *tag) parseTagStr(tagStr string) {
+ parts := strings.Split(tagStr, ",")
+ if len(parts) == 0 {
+ return
+ }
+
+ if name := parts[0]; name == "-" {
+ t.Name = ""
+ t.Ignore = true
+ } else {
+ t.Name = name
+ t.Ignore = false
+ }
+
+ for _, opt := range parts[1:] {
+ switch opt {
+ case "omitempty":
+ t.OmitEmpty = true
+ case "omitemptyelem":
+ t.OmitEmptyElem = true
+ case "string":
+ t.AsString = true
+ case "binaryset":
+ t.AsBinSet = true
+ case "numberset":
+ t.AsNumSet = true
+ case "stringset":
+ t.AsStrSet = true
+ case "unixtime":
+ t.AsUnixTime = true
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go
new file mode 100644
index 000000000..8b9f524e0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go
@@ -0,0 +1,226 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package dynamodbiface provides an interface to enable mocking the Amazon DynamoDB service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package dynamodbiface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/dynamodb"
+)
+
+// DynamoDBAPI provides an interface to enable mocking the
+// dynamodb.DynamoDB service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // Amazon DynamoDB.
+// func myFunc(svc dynamodbiface.DynamoDBAPI) bool {
+// // Make svc.BatchGetItem request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := dynamodb.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockDynamoDBClient struct {
+// dynamodbiface.DynamoDBAPI
+// }
+// func (m *mockDynamoDBClient) BatchGetItem(input *dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockDynamoDBClient{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type DynamoDBAPI interface {
+ BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error)
+ BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error)
+ BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput)
+
+ BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error
+ BatchGetItemPagesWithContext(aws.Context, *dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool, ...request.Option) error
+
+ BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error)
+ BatchWriteItemWithContext(aws.Context, *dynamodb.BatchWriteItemInput, ...request.Option) (*dynamodb.BatchWriteItemOutput, error)
+ BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput)
+
+ CreateBackup(*dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error)
+ CreateBackupWithContext(aws.Context, *dynamodb.CreateBackupInput, ...request.Option) (*dynamodb.CreateBackupOutput, error)
+ CreateBackupRequest(*dynamodb.CreateBackupInput) (*request.Request, *dynamodb.CreateBackupOutput)
+
+ CreateGlobalTable(*dynamodb.CreateGlobalTableInput) (*dynamodb.CreateGlobalTableOutput, error)
+ CreateGlobalTableWithContext(aws.Context, *dynamodb.CreateGlobalTableInput, ...request.Option) (*dynamodb.CreateGlobalTableOutput, error)
+ CreateGlobalTableRequest(*dynamodb.CreateGlobalTableInput) (*request.Request, *dynamodb.CreateGlobalTableOutput)
+
+ CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error)
+ CreateTableWithContext(aws.Context, *dynamodb.CreateTableInput, ...request.Option) (*dynamodb.CreateTableOutput, error)
+ CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput)
+
+ DeleteBackup(*dynamodb.DeleteBackupInput) (*dynamodb.DeleteBackupOutput, error)
+ DeleteBackupWithContext(aws.Context, *dynamodb.DeleteBackupInput, ...request.Option) (*dynamodb.DeleteBackupOutput, error)
+ DeleteBackupRequest(*dynamodb.DeleteBackupInput) (*request.Request, *dynamodb.DeleteBackupOutput)
+
+ DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error)
+ DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error)
+ DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput)
+
+ DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error)
+ DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error)
+ DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput)
+
+ DescribeBackup(*dynamodb.DescribeBackupInput) (*dynamodb.DescribeBackupOutput, error)
+ DescribeBackupWithContext(aws.Context, *dynamodb.DescribeBackupInput, ...request.Option) (*dynamodb.DescribeBackupOutput, error)
+ DescribeBackupRequest(*dynamodb.DescribeBackupInput) (*request.Request, *dynamodb.DescribeBackupOutput)
+
+ DescribeContinuousBackups(*dynamodb.DescribeContinuousBackupsInput) (*dynamodb.DescribeContinuousBackupsOutput, error)
+ DescribeContinuousBackupsWithContext(aws.Context, *dynamodb.DescribeContinuousBackupsInput, ...request.Option) (*dynamodb.DescribeContinuousBackupsOutput, error)
+ DescribeContinuousBackupsRequest(*dynamodb.DescribeContinuousBackupsInput) (*request.Request, *dynamodb.DescribeContinuousBackupsOutput)
+
+ DescribeEndpoints(*dynamodb.DescribeEndpointsInput) (*dynamodb.DescribeEndpointsOutput, error)
+ DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error)
+ DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput)
+
+ DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error)
+ DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error)
+ DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput)
+
+ DescribeGlobalTableSettings(*dynamodb.DescribeGlobalTableSettingsInput) (*dynamodb.DescribeGlobalTableSettingsOutput, error)
+ DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error)
+ DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput)
+
+ DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error)
+ DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error)
+ DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput)
+
+ DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error)
+ DescribeTableWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.Option) (*dynamodb.DescribeTableOutput, error)
+ DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput)
+
+ DescribeTimeToLive(*dynamodb.DescribeTimeToLiveInput) (*dynamodb.DescribeTimeToLiveOutput, error)
+ DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error)
+ DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput)
+
+ GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error)
+ GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error)
+ GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput)
+
+ ListBackups(*dynamodb.ListBackupsInput) (*dynamodb.ListBackupsOutput, error)
+ ListBackupsWithContext(aws.Context, *dynamodb.ListBackupsInput, ...request.Option) (*dynamodb.ListBackupsOutput, error)
+ ListBackupsRequest(*dynamodb.ListBackupsInput) (*request.Request, *dynamodb.ListBackupsOutput)
+
+ ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error)
+ ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error)
+ ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput)
+
+ ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error)
+ ListTablesWithContext(aws.Context, *dynamodb.ListTablesInput, ...request.Option) (*dynamodb.ListTablesOutput, error)
+ ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput)
+
+ ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error
+ ListTablesPagesWithContext(aws.Context, *dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool, ...request.Option) error
+
+ ListTagsOfResource(*dynamodb.ListTagsOfResourceInput) (*dynamodb.ListTagsOfResourceOutput, error)
+ ListTagsOfResourceWithContext(aws.Context, *dynamodb.ListTagsOfResourceInput, ...request.Option) (*dynamodb.ListTagsOfResourceOutput, error)
+ ListTagsOfResourceRequest(*dynamodb.ListTagsOfResourceInput) (*request.Request, *dynamodb.ListTagsOfResourceOutput)
+
+ PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error)
+ PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error)
+ PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput)
+
+ Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error)
+ QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error)
+ QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput)
+
+ QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error
+ QueryPagesWithContext(aws.Context, *dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool, ...request.Option) error
+
+ RestoreTableFromBackup(*dynamodb.RestoreTableFromBackupInput) (*dynamodb.RestoreTableFromBackupOutput, error)
+ RestoreTableFromBackupWithContext(aws.Context, *dynamodb.RestoreTableFromBackupInput, ...request.Option) (*dynamodb.RestoreTableFromBackupOutput, error)
+ RestoreTableFromBackupRequest(*dynamodb.RestoreTableFromBackupInput) (*request.Request, *dynamodb.RestoreTableFromBackupOutput)
+
+ RestoreTableToPointInTime(*dynamodb.RestoreTableToPointInTimeInput) (*dynamodb.RestoreTableToPointInTimeOutput, error)
+ RestoreTableToPointInTimeWithContext(aws.Context, *dynamodb.RestoreTableToPointInTimeInput, ...request.Option) (*dynamodb.RestoreTableToPointInTimeOutput, error)
+ RestoreTableToPointInTimeRequest(*dynamodb.RestoreTableToPointInTimeInput) (*request.Request, *dynamodb.RestoreTableToPointInTimeOutput)
+
+ Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error)
+ ScanWithContext(aws.Context, *dynamodb.ScanInput, ...request.Option) (*dynamodb.ScanOutput, error)
+ ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput)
+
+ ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error
+ ScanPagesWithContext(aws.Context, *dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool, ...request.Option) error
+
+ TagResource(*dynamodb.TagResourceInput) (*dynamodb.TagResourceOutput, error)
+ TagResourceWithContext(aws.Context, *dynamodb.TagResourceInput, ...request.Option) (*dynamodb.TagResourceOutput, error)
+ TagResourceRequest(*dynamodb.TagResourceInput) (*request.Request, *dynamodb.TagResourceOutput)
+
+ TransactGetItems(*dynamodb.TransactGetItemsInput) (*dynamodb.TransactGetItemsOutput, error)
+ TransactGetItemsWithContext(aws.Context, *dynamodb.TransactGetItemsInput, ...request.Option) (*dynamodb.TransactGetItemsOutput, error)
+ TransactGetItemsRequest(*dynamodb.TransactGetItemsInput) (*request.Request, *dynamodb.TransactGetItemsOutput)
+
+ TransactWriteItems(*dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error)
+ TransactWriteItemsWithContext(aws.Context, *dynamodb.TransactWriteItemsInput, ...request.Option) (*dynamodb.TransactWriteItemsOutput, error)
+ TransactWriteItemsRequest(*dynamodb.TransactWriteItemsInput) (*request.Request, *dynamodb.TransactWriteItemsOutput)
+
+ UntagResource(*dynamodb.UntagResourceInput) (*dynamodb.UntagResourceOutput, error)
+ UntagResourceWithContext(aws.Context, *dynamodb.UntagResourceInput, ...request.Option) (*dynamodb.UntagResourceOutput, error)
+ UntagResourceRequest(*dynamodb.UntagResourceInput) (*request.Request, *dynamodb.UntagResourceOutput)
+
+ UpdateContinuousBackups(*dynamodb.UpdateContinuousBackupsInput) (*dynamodb.UpdateContinuousBackupsOutput, error)
+ UpdateContinuousBackupsWithContext(aws.Context, *dynamodb.UpdateContinuousBackupsInput, ...request.Option) (*dynamodb.UpdateContinuousBackupsOutput, error)
+ UpdateContinuousBackupsRequest(*dynamodb.UpdateContinuousBackupsInput) (*request.Request, *dynamodb.UpdateContinuousBackupsOutput)
+
+ UpdateGlobalTable(*dynamodb.UpdateGlobalTableInput) (*dynamodb.UpdateGlobalTableOutput, error)
+ UpdateGlobalTableWithContext(aws.Context, *dynamodb.UpdateGlobalTableInput, ...request.Option) (*dynamodb.UpdateGlobalTableOutput, error)
+ UpdateGlobalTableRequest(*dynamodb.UpdateGlobalTableInput) (*request.Request, *dynamodb.UpdateGlobalTableOutput)
+
+ UpdateGlobalTableSettings(*dynamodb.UpdateGlobalTableSettingsInput) (*dynamodb.UpdateGlobalTableSettingsOutput, error)
+ UpdateGlobalTableSettingsWithContext(aws.Context, *dynamodb.UpdateGlobalTableSettingsInput, ...request.Option) (*dynamodb.UpdateGlobalTableSettingsOutput, error)
+ UpdateGlobalTableSettingsRequest(*dynamodb.UpdateGlobalTableSettingsInput) (*request.Request, *dynamodb.UpdateGlobalTableSettingsOutput)
+
+ UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error)
+ UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error)
+ UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput)
+
+ UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error)
+ UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error)
+ UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput)
+
+ UpdateTimeToLive(*dynamodb.UpdateTimeToLiveInput) (*dynamodb.UpdateTimeToLiveOutput, error)
+ UpdateTimeToLiveWithContext(aws.Context, *dynamodb.UpdateTimeToLiveInput, ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error)
+ UpdateTimeToLiveRequest(*dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput)
+
+ WaitUntilTableExists(*dynamodb.DescribeTableInput) error
+ WaitUntilTableExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error
+
+ WaitUntilTableNotExists(*dynamodb.DescribeTableInput) error
+ WaitUntilTableNotExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error
+}
+
+var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go
new file mode 100644
index 000000000..71f3e7d3d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go
@@ -0,0 +1,270 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+const (
+
+ // ErrCodeBackupInUseException for service response error code
+ // "BackupInUseException".
+ //
+ // There is another ongoing conflicting backup control plane operation on the
+ // table. The backup is either being created, deleted or restored to a table.
+ ErrCodeBackupInUseException = "BackupInUseException"
+
+ // ErrCodeBackupNotFoundException for service response error code
+ // "BackupNotFoundException".
+ //
+ // Backup not found for the given BackupARN.
+ ErrCodeBackupNotFoundException = "BackupNotFoundException"
+
+ // ErrCodeConditionalCheckFailedException for service response error code
+ // "ConditionalCheckFailedException".
+ //
+ // A condition specified in the operation could not be evaluated.
+ ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException"
+
+ // ErrCodeContinuousBackupsUnavailableException for service response error code
+ // "ContinuousBackupsUnavailableException".
+ //
+ // Backups have not yet been enabled for this table.
+ ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException"
+
+ // ErrCodeGlobalTableAlreadyExistsException for service response error code
+ // "GlobalTableAlreadyExistsException".
+ //
+ // The specified global table already exists.
+ ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException"
+
+ // ErrCodeGlobalTableNotFoundException for service response error code
+ // "GlobalTableNotFoundException".
+ //
+ // The specified global table does not exist.
+ ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException"
+
+ // ErrCodeIdempotentParameterMismatchException for service response error code
+ // "IdempotentParameterMismatchException".
+ //
+ // DynamoDB rejected the request because you retried a request with a different
+ // payload but with an idempotent token that was already used.
+ ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException"
+
+ // ErrCodeIndexNotFoundException for service response error code
+ // "IndexNotFoundException".
+ //
+ // The operation tried to access a nonexistent index.
+ ErrCodeIndexNotFoundException = "IndexNotFoundException"
+
+ // ErrCodeInternalServerError for service response error code
+ // "InternalServerError".
+ //
+ // An error occurred on the server side.
+ ErrCodeInternalServerError = "InternalServerError"
+
+ // ErrCodeInvalidRestoreTimeException for service response error code
+ // "InvalidRestoreTimeException".
+ //
+ // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
+ // and LatestRestorableDateTime.
+ ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException"
+
+ // ErrCodeItemCollectionSizeLimitExceededException for service response error code
+ // "ItemCollectionSizeLimitExceededException".
+ //
+ // An item collection is too large. This exception is only returned for tables
+ // that have one or more local secondary indexes.
+ ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException"
+
+ // ErrCodeLimitExceededException for service response error code
+ // "LimitExceededException".
+ //
+ // There is no limit to the number of daily on-demand backups that can be taken.
+ //
+ // Up to 50 simultaneous table operations are allowed per account. These operations
+ // include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
+ // and RestoreTableToPointInTime.
+ //
+ // The only exception is when you are creating a table with one or more secondary
+ // indexes. You can have up to 25 such requests running at a time; however,
+ // if the table or index specifications are complex, DynamoDB might temporarily
+ // reduce the number of concurrent operations.
+ //
+ // There is a soft account limit of 256 tables.
+ ErrCodeLimitExceededException = "LimitExceededException"
+
+ // ErrCodePointInTimeRecoveryUnavailableException for service response error code
+ // "PointInTimeRecoveryUnavailableException".
+ //
+ // Point in time recovery has not yet been enabled for this source table.
+ ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException"
+
+ // ErrCodeProvisionedThroughputExceededException for service response error code
+ // "ProvisionedThroughputExceededException".
+ //
+ // Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
+ // requests that receive this exception. Your request is eventually successful,
+ // unless your retry queue is too large to finish. Reduce the frequency of requests
+ // and use exponential backoff. For more information, go to Error Retries and
+ // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
+ // in the Amazon DynamoDB Developer Guide.
+ ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException"
+
+ // ErrCodeReplicaAlreadyExistsException for service response error code
+ // "ReplicaAlreadyExistsException".
+ //
+ // The specified replica is already part of the global table.
+ ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException"
+
+ // ErrCodeReplicaNotFoundException for service response error code
+ // "ReplicaNotFoundException".
+ //
+ // The specified replica is no longer part of the global table.
+ ErrCodeReplicaNotFoundException = "ReplicaNotFoundException"
+
+ // ErrCodeRequestLimitExceeded for service response error code
+ // "RequestLimitExceeded".
+ //
+ // Throughput exceeds the current throughput limit for your account. Please
+ // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
+ // a limit increase.
+ ErrCodeRequestLimitExceeded = "RequestLimitExceeded"
+
+ // ErrCodeResourceInUseException for service response error code
+ // "ResourceInUseException".
+ //
+ // The operation conflicts with the resource's availability. For example, you
+ // attempted to recreate an existing table, or tried to delete a table currently
+ // in the CREATING state.
+ ErrCodeResourceInUseException = "ResourceInUseException"
+
+ // ErrCodeResourceNotFoundException for service response error code
+ // "ResourceNotFoundException".
+ //
+ // The operation tried to access a nonexistent table or index. The resource
+ // might not be specified correctly, or its status might not be ACTIVE.
+ ErrCodeResourceNotFoundException = "ResourceNotFoundException"
+
+ // ErrCodeTableAlreadyExistsException for service response error code
+ // "TableAlreadyExistsException".
+ //
+ // A target table with the specified name already exists.
+ ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException"
+
+ // ErrCodeTableInUseException for service response error code
+ // "TableInUseException".
+ //
+ // A target table with the specified name is either being created or deleted.
+ ErrCodeTableInUseException = "TableInUseException"
+
+ // ErrCodeTableNotFoundException for service response error code
+ // "TableNotFoundException".
+ //
+ // A source table with the name TableName does not currently exist within the
+ // subscriber's account.
+ ErrCodeTableNotFoundException = "TableNotFoundException"
+
+ // ErrCodeTransactionCanceledException for service response error code
+ // "TransactionCanceledException".
+ //
+ // The entire transaction request was canceled.
+ //
+ // DynamoDB cancels a TransactWriteItems request under the following circumstances:
+ //
+ // * A condition in one of the condition expressions is not met.
+ //
+ // * A table in the TransactWriteItems request is in a different account
+ // or region.
+ //
+ // * More than one action in the TransactWriteItems operation targets the
+ // same item.
+ //
+ // * There is insufficient provisioned capacity for the transaction to be
+ // completed.
+ //
+ // * An item size becomes too large (larger than 400 KB), or a local secondary
+ // index (LSI) becomes too large, or a similar validation error occurs because
+ // of changes made by the transaction.
+ //
+ // * The aggregate size of the items in the transaction exceeds 4 MBs.
+ //
+ // * There is a user error, such as an invalid data format.
+ //
+ // DynamoDB cancels a TransactGetItems request under the following circumstances:
+ //
+ // * There is an ongoing TransactGetItems operation that conflicts with a
+ // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
+ // In this case the TransactGetItems operation fails with a TransactionCanceledException.
+ //
+ // * A table in the TransactGetItems request is in a different account or
+ // region.
+ //
+ // * There is insufficient provisioned capacity for the transaction to be
+ // completed.
+ //
+ // * The aggregate size of the items in the transaction exceeds 4 MBs.
+ //
+ // * There is a user error, such as an invalid data format.
+ //
+ // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
+ // property. This property is not set for other languages. Transaction cancellation
+ // reasons are ordered in the order of requested items, if an item has no error
+ // it will have NONE code and Null message.
+ //
+ // Cancellation reason codes and possible error messages:
+ //
+ // * No Errors: Code: NONE Message: null
+ //
+ // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The
+ // conditional request failed.
+ //
+ // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
+ // Message: Collection size exceeded.
+ //
+ // * Transaction Conflict: Code: TransactionConflict Message: Transaction
+ // is ongoing for the item.
+ //
+ // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
+ // Messages: The level of configured provisioned throughput for the table
+ // was exceeded. Consider increasing your provisioning level with the UpdateTable
+ // API. This Message is received when provisioned throughput is exceeded
+ // is on a provisioned DynamoDB table. The level of configured provisioned
+ // throughput for one or more global secondary indexes of the table was exceeded.
+ // Consider increasing your provisioning level for the under-provisioned
+ // global secondary indexes with the UpdateTable API. This message is returned
+ // when provisioned throughput is exceeded is on a provisioned GSI.
+ //
+ // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
+ // the current capacity of your table or index. DynamoDB is automatically
+ // scaling your table or index so please try again shortly. If exceptions
+ // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
+ // This message is returned when writes get throttled on an On-Demand table
+ // as DynamoDB is automatically scaling the table. Throughput exceeds the
+ // current capacity for one or more global secondary indexes. DynamoDB is
+ // automatically scaling your index so please try again shortly. This message
+ // is returned when when writes get throttled on an On-Demand GSI as DynamoDB
+ // is automatically scaling the GSI.
+ //
+ // * Validation Error: Code: ValidationError Messages: One or more parameter
+ // values were invalid. The update expression attempted to update the secondary
+ // index key beyond allowed size limits. The update expression attempted
+ // to update the secondary index key to unsupported type. An operand in the
+ // update expression has an incorrect data type. Item size to update has
+ // exceeded the maximum allowed size. Number overflow. Attempting to store
+ // a number with magnitude larger than supported range. Type mismatch for
+ // attribute to update. Nesting Levels have exceeded supported limits. The
+ // document path provided in the update expression is invalid for update.
+ // The provided expression refers to an attribute that does not exist in
+ // the item.
+ ErrCodeTransactionCanceledException = "TransactionCanceledException"
+
+ // ErrCodeTransactionConflictException for service response error code
+ // "TransactionConflictException".
+ //
+ // Operation was rejected because there is an ongoing transaction for the item.
+ ErrCodeTransactionConflictException = "TransactionConflictException"
+
+ // ErrCodeTransactionInProgressException for service response error code
+ // "TransactionInProgressException".
+ //
+ // The transaction with the given request token is already in progress.
+ ErrCodeTransactionInProgressException = "TransactionInProgressException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go
new file mode 100644
index 000000000..0400da631
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go
@@ -0,0 +1,101 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/crr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+)
+
+// DynamoDB provides the API operation methods for making requests to
+// Amazon DynamoDB. See this package's package overview docs
+// for details on the service.
+//
+// DynamoDB methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type DynamoDB struct {
+ *client.Client
+ endpointCache *crr.EndpointCache
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "dynamodb" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "DynamoDB" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the DynamoDB client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a DynamoDB client from just a session.
+// svc := dynamodb.New(mySession)
+//
+// // Create a DynamoDB client with additional configuration
+// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DynamoDB {
+ svc := &DynamoDB{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2012-08-10",
+ JSONVersion: "1.0",
+ TargetPrefix: "DynamoDB_20120810",
+ },
+ handlers,
+ ),
+ }
+ svc.endpointCache = crr.NewEndpointCache(10)
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a DynamoDB operation and runs any
+// custom request initialization.
+func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go
new file mode 100644
index 000000000..ae515f7de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go
@@ -0,0 +1,107 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// WaitUntilTableExists uses the DynamoDB API operation
+// DescribeTable to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
+ return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilTableExists",
+ MaxAttempts: 25,
+ Delay: request.ConstantWaiterDelay(20 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus",
+ Expected: "ACTIVE",
+ },
+ {
+ State: request.RetryWaiterState,
+ Matcher: request.ErrorWaiterMatch,
+ Expected: "ResourceNotFoundException",
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *DescribeTableInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeTableRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
+
+// WaitUntilTableNotExists uses the DynamoDB API operation
+// DescribeTable to wait for a condition to be met before returning.
+// If the condition is not met within the max attempt window, an error will
+// be returned.
+func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error {
+ return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilTableNotExists",
+ MaxAttempts: 25,
+ Delay: request.ConstantWaiterDelay(20 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
+ {
+ State: request.SuccessWaiterState,
+ Matcher: request.ErrorWaiterMatch,
+ Expected: "ResourceNotFoundException",
+ },
+ },
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *DescribeTableInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeTableRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+ w.ApplyOptions(opts...)
+
+ return w.WaitWithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go
new file mode 100644
index 000000000..bc087b5b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go
@@ -0,0 +1,5055 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sqs
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+const opAddPermission = "AddPermission"
+
+// AddPermissionRequest generates a "aws/request.Request" representing the
+// client's request for the AddPermission operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AddPermission for more information on using the AddPermission
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AddPermissionRequest method.
+// req, resp := client.AddPermissionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/AddPermission
+func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) {
+ op := &request.Operation{
+ Name: opAddPermission,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AddPermissionInput{}
+ }
+
+ output = &AddPermissionOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// AddPermission API operation for Amazon Simple Queue Service.
+//
+// Adds a permission to a queue for a specific principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P).
+// This allows sharing access to the queue.
+//
+// When you create a queue, you have full control access rights for the queue.
+// Only you, the owner of the queue, can grant or deny permissions to the queue.
+// For more information about these permissions, see Allow Developers to Write
+// Messages to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// * AddPermission generates a policy for you. You can use SetQueueAttributes
+// to upload your policy. For more information, see Using Custom Policies
+// with the Amazon SQS Access Policy Language (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-creating-custom-policies.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// * An Amazon SQS policy can have a maximum of 7 actions.
+//
+// * To remove the ability to change queue permissions, you must deny permission
+// to the AddPermission, RemovePermission, and SetQueueAttributes actions
+// in your IAM policy.
+//
+// Some actions take lists of parameters. These lists are specified using the
+// param.n notation. Values of n are integers starting from 1. For example,
+// a parameter list with two elements looks like this:
+//
+// &Attribute.1=first
+//
+// &Attribute.2=second
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation AddPermission for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeOverLimit "OverLimit"
+// The specified action violates a limit. For example, ReceiveMessage returns
+// this error if the maximum number of inflight messages is reached and AddPermission
+// returns this error if the maximum number of permissions for the queue is
+// reached.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/AddPermission
+func (c *SQS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, error) {
+ req, out := c.AddPermissionRequest(input)
+ return out, req.Send()
+}
+
+// AddPermissionWithContext is the same as AddPermission with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AddPermission for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) AddPermissionWithContext(ctx aws.Context, input *AddPermissionInput, opts ...request.Option) (*AddPermissionOutput, error) {
+ req, out := c.AddPermissionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opChangeMessageVisibility = "ChangeMessageVisibility"
+
+// ChangeMessageVisibilityRequest generates a "aws/request.Request" representing the
+// client's request for the ChangeMessageVisibility operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ChangeMessageVisibility for more information on using the ChangeMessageVisibility
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ChangeMessageVisibilityRequest method.
+// req, resp := client.ChangeMessageVisibilityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibility
+func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput) (req *request.Request, output *ChangeMessageVisibilityOutput) {
+ op := &request.Operation{
+ Name: opChangeMessageVisibility,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ChangeMessageVisibilityInput{}
+ }
+
+ output = &ChangeMessageVisibilityOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// ChangeMessageVisibility API operation for Amazon Simple Queue Service.
+//
+// Changes the visibility timeout of a specified message in a queue to a new
+// value. The default visibility timeout for a message is 30 seconds. The minimum
+// is 0 seconds. The maximum is 12 hours. For more information, see Visibility
+// Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// For example, you have a message with a visibility timeout of 5 minutes. After
+// 3 minutes, you call ChangeMessageVisibility with a timeout of 10 minutes.
+// You can continue to call ChangeMessageVisibility to extend the visibility
+// timeout to the maximum allowed time. If you try to extend the visibility
+// timeout beyond the maximum, your request is rejected.
+//
+// An Amazon SQS message has three basic states:
+//
+// Sent to a queue by a producer.
+//
+// Received from the queue by a consumer.
+//
+// Deleted from the queue.
+//
+// A message is considered to be stored after it is sent to a queue by a producer,
+// but not yet received from the queue by a consumer (that is, between states
+// 1 and 2). There is no limit to the number of stored messages. A message is
+// considered to be in flight after it is received from a queue by a consumer,
+// but not yet deleted from the queue (that is, between states 2 and 3). There
+// is a limit to the number of inflight messages.
+//
+// Limits that apply to inflight messages are unrelated to the unlimited number
+// of stored messages.
+//
+// For most standard queues (depending on queue traffic and message backlog),
+// there can be a maximum of approximately 120,000 inflight messages (received
+// from a queue by a consumer, but not yet deleted from the queue). If you reach
+// this limit, Amazon SQS returns the OverLimit error message. To avoid reaching
+// the limit, you should delete messages from the queue after they're processed.
+// You can also increase the number of queues you use to process your messages.
+// To request a limit increase, file a support request (https://console.aws.amazon.com/support/home#/case/create?issueType=service-limit-increase&limitType=service-code-sqs).
+//
+// For FIFO queues, there can be a maximum of 20,000 inflight messages (received
+// from a queue by a consumer, but not yet deleted from the queue). If you reach
+// this limit, Amazon SQS returns no error messages.
+//
+// If you attempt to set the VisibilityTimeout to a value greater than the maximum
+// time left, Amazon SQS returns an error. Amazon SQS doesn't automatically
+// recalculate and increase the timeout to the maximum remaining time.
+//
+// Unlike with a queue, when you change the visibility timeout for a specific
+// message the timeout value is applied immediately but isn't saved in memory
+// for that message. If you don't delete a message after it is received, the
+// visibility timeout for the message reverts to the original timeout value
+// (not to the value you set using the ChangeMessageVisibility action) the next
+// time the message is received.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation ChangeMessageVisibility for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMessageNotInflight "AWS.SimpleQueueService.MessageNotInflight"
+// The specified message isn't in flight.
+//
+// * ErrCodeReceiptHandleIsInvalid "ReceiptHandleIsInvalid"
+// The specified receipt handle isn't valid.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibility
+func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*ChangeMessageVisibilityOutput, error) {
+ req, out := c.ChangeMessageVisibilityRequest(input)
+ return out, req.Send()
+}
+
+// ChangeMessageVisibilityWithContext is the same as ChangeMessageVisibility with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ChangeMessageVisibility for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) ChangeMessageVisibilityWithContext(ctx aws.Context, input *ChangeMessageVisibilityInput, opts ...request.Option) (*ChangeMessageVisibilityOutput, error) {
+ req, out := c.ChangeMessageVisibilityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opChangeMessageVisibilityBatch = "ChangeMessageVisibilityBatch"
+
+// ChangeMessageVisibilityBatchRequest generates a "aws/request.Request" representing the
+// client's request for the ChangeMessageVisibilityBatch operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ChangeMessageVisibilityBatch for more information on using the ChangeMessageVisibilityBatch
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ChangeMessageVisibilityBatchRequest method.
+// req, resp := client.ChangeMessageVisibilityBatchRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibilityBatch
+func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibilityBatchInput) (req *request.Request, output *ChangeMessageVisibilityBatchOutput) {
+ op := &request.Operation{
+ Name: opChangeMessageVisibilityBatch,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ChangeMessageVisibilityBatchInput{}
+ }
+
+ output = &ChangeMessageVisibilityBatchOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ChangeMessageVisibilityBatch API operation for Amazon Simple Queue Service.
+//
+// Changes the visibility timeout of multiple messages. This is a batch version
+// of ChangeMessageVisibility. The result of the action on each message is reported
+// individually in the response. You can send up to 10 ChangeMessageVisibility
+// requests with each ChangeMessageVisibilityBatch action.
+//
+// Because the batch request can result in a combination of successful and unsuccessful
+// actions, you should check for batch errors even when the call returns an
+// HTTP status code of 200.
+//
+// Some actions take lists of parameters. These lists are specified using the
+// param.n notation. Values of n are integers starting from 1. For example,
+// a parameter list with two elements looks like this:
+//
+// &Attribute.1=first
+//
+// &Attribute.2=second
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation ChangeMessageVisibilityBatch for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTooManyEntriesInBatchRequest "AWS.SimpleQueueService.TooManyEntriesInBatchRequest"
+// The batch request contains more entries than permissible.
+//
+// * ErrCodeEmptyBatchRequest "AWS.SimpleQueueService.EmptyBatchRequest"
+// The batch request doesn't contain any entries.
+//
+// * ErrCodeBatchEntryIdsNotDistinct "AWS.SimpleQueueService.BatchEntryIdsNotDistinct"
+// Two or more batch entries in the request have the same Id.
+//
+// * ErrCodeInvalidBatchEntryId "AWS.SimpleQueueService.InvalidBatchEntryId"
+// The Id of a batch entry in a batch request doesn't abide by the specification.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ChangeMessageVisibilityBatch
+func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchInput) (*ChangeMessageVisibilityBatchOutput, error) {
+ req, out := c.ChangeMessageVisibilityBatchRequest(input)
+ return out, req.Send()
+}
+
+// ChangeMessageVisibilityBatchWithContext is the same as ChangeMessageVisibilityBatch with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ChangeMessageVisibilityBatch for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) ChangeMessageVisibilityBatchWithContext(ctx aws.Context, input *ChangeMessageVisibilityBatchInput, opts ...request.Option) (*ChangeMessageVisibilityBatchOutput, error) {
+ req, out := c.ChangeMessageVisibilityBatchRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateQueue = "CreateQueue"
+
+// CreateQueueRequest generates a "aws/request.Request" representing the
+// client's request for the CreateQueue operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateQueue for more information on using the CreateQueue
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateQueueRequest method.
+// req, resp := client.CreateQueueRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/CreateQueue
+func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) {
+ op := &request.Operation{
+ Name: opCreateQueue,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateQueueInput{}
+ }
+
+ output = &CreateQueueOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateQueue API operation for Amazon Simple Queue Service.
+//
+// Creates a new standard or FIFO queue. You can pass one or more attributes
+// in the request. Keep the following caveats in mind:
+//
+// * If you don't specify the FifoQueue attribute, Amazon SQS creates a standard
+// queue. You can't change the queue type after you create it and you can't
+// convert an existing standard queue into a FIFO queue. You must either
+// create a new FIFO queue for your application or delete your existing standard
+// queue and recreate it as a FIFO queue. For more information, see Moving
+// From a Standard Queue to a FIFO Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-moving)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// * If you don't provide a value for an attribute, the queue is created
+// with the default value for the attribute.
+//
+// * If you delete a queue, you must wait at least 60 seconds before creating
+// a queue with the same name.
+//
+// To successfully create a new queue, you must provide a queue name that adheres
+// to the limits related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html)
+// and is unique within the scope of your queues.
+//
+// To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only
+// the QueueName parameter. be aware of existing queue names:
+//
+// * If you provide the name of an existing queue along with the exact names
+// and values of all the queue's attributes, CreateQueue returns the queue
+// URL for the existing queue.
+//
+// * If the queue name, attribute names, or attribute values don't match
+// an existing queue, CreateQueue returns an error.
+//
+// Some actions take lists of parameters. These lists are specified using the
+// param.n notation. Values of n are integers starting from 1. For example,
+// a parameter list with two elements looks like this:
+//
+// &Attribute.1=first
+//
+// &Attribute.2=second
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation CreateQueue for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeQueueDeletedRecently "AWS.SimpleQueueService.QueueDeletedRecently"
+// You must wait 60 seconds after deleting a queue before you can create another
+// queue with the same name.
+//
+// * ErrCodeQueueNameExists "QueueAlreadyExists"
+// A queue with this name already exists. Amazon SQS returns this error only
+// if the request includes attributes whose values differ from those of the
+// existing queue.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/CreateQueue
+func (c *SQS) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) {
+ req, out := c.CreateQueueRequest(input)
+ return out, req.Send()
+}
+
+// CreateQueueWithContext is the same as CreateQueue with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateQueue for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) CreateQueueWithContext(ctx aws.Context, input *CreateQueueInput, opts ...request.Option) (*CreateQueueOutput, error) {
+ req, out := c.CreateQueueRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteMessage = "DeleteMessage"
+
+// DeleteMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteMessage for more information on using the DeleteMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteMessageRequest method.
+// req, resp := client.DeleteMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessage
+func (c *SQS) DeleteMessageRequest(input *DeleteMessageInput) (req *request.Request, output *DeleteMessageOutput) {
+ op := &request.Operation{
+ Name: opDeleteMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteMessageInput{}
+ }
+
+ output = &DeleteMessageOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteMessage API operation for Amazon Simple Queue Service.
+//
+// Deletes the specified message from the specified queue. To select the message
+// to delete, use the ReceiptHandle of the message (not the MessageId which
+// you receive when you send the message). Amazon SQS can delete a message from
+// a queue even if a visibility timeout setting causes the message to be locked
+// by another consumer. Amazon SQS automatically deletes messages left in a
+// queue longer than the retention period configured for the queue.
+//
+// The ReceiptHandle is associated with a specific instance of receiving a message.
+// If you receive a message more than once, the ReceiptHandle is different each
+// time you receive a message. When you use the DeleteMessage action, you must
+// provide the most recently received ReceiptHandle for the message (otherwise,
+// the request succeeds, but the message might not be deleted).
+//
+// For standard queues, it is possible to receive a message even after you delete
+// it. This might happen on rare occasions if one of the servers which stores
+// a copy of the message is unavailable when you send the request to delete
+// the message. The copy remains on the server and might be returned to you
+// during a subsequent receive request. You should ensure that your application
+// is idempotent, so that receiving a message more than once does not cause
+// issues.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation DeleteMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidIdFormat "InvalidIdFormat"
+// The specified receipt handle isn't valid for the current version.
+//
+// * ErrCodeReceiptHandleIsInvalid "ReceiptHandleIsInvalid"
+// The specified receipt handle isn't valid.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessage
+func (c *SQS) DeleteMessage(input *DeleteMessageInput) (*DeleteMessageOutput, error) {
+ req, out := c.DeleteMessageRequest(input)
+ return out, req.Send()
+}
+
+// DeleteMessageWithContext is the same as DeleteMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) DeleteMessageWithContext(ctx aws.Context, input *DeleteMessageInput, opts ...request.Option) (*DeleteMessageOutput, error) {
+ req, out := c.DeleteMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteMessageBatch = "DeleteMessageBatch"
+
+// DeleteMessageBatchRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteMessageBatch operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteMessageBatch for more information on using the DeleteMessageBatch
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteMessageBatchRequest method.
+// req, resp := client.DeleteMessageBatchRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessageBatch
+func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *request.Request, output *DeleteMessageBatchOutput) {
+ op := &request.Operation{
+ Name: opDeleteMessageBatch,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteMessageBatchInput{}
+ }
+
+ output = &DeleteMessageBatchOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteMessageBatch API operation for Amazon Simple Queue Service.
+//
+// Deletes up to ten messages from the specified queue. This is a batch version
+// of DeleteMessage. The result of the action on each message is reported individually
+// in the response.
+//
+// Because the batch request can result in a combination of successful and unsuccessful
+// actions, you should check for batch errors even when the call returns an
+// HTTP status code of 200.
+//
+// Some actions take lists of parameters. These lists are specified using the
+// param.n notation. Values of n are integers starting from 1. For example,
+// a parameter list with two elements looks like this:
+//
+// &Attribute.1=first
+//
+// &Attribute.2=second
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation DeleteMessageBatch for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTooManyEntriesInBatchRequest "AWS.SimpleQueueService.TooManyEntriesInBatchRequest"
+// The batch request contains more entries than permissible.
+//
+// * ErrCodeEmptyBatchRequest "AWS.SimpleQueueService.EmptyBatchRequest"
+// The batch request doesn't contain any entries.
+//
+// * ErrCodeBatchEntryIdsNotDistinct "AWS.SimpleQueueService.BatchEntryIdsNotDistinct"
+// Two or more batch entries in the request have the same Id.
+//
+// * ErrCodeInvalidBatchEntryId "AWS.SimpleQueueService.InvalidBatchEntryId"
+// The Id of a batch entry in a batch request doesn't abide by the specification.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteMessageBatch
+func (c *SQS) DeleteMessageBatch(input *DeleteMessageBatchInput) (*DeleteMessageBatchOutput, error) {
+ req, out := c.DeleteMessageBatchRequest(input)
+ return out, req.Send()
+}
+
+// DeleteMessageBatchWithContext is the same as DeleteMessageBatch with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteMessageBatch for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) DeleteMessageBatchWithContext(ctx aws.Context, input *DeleteMessageBatchInput, opts ...request.Option) (*DeleteMessageBatchOutput, error) {
+ req, out := c.DeleteMessageBatchRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteQueue = "DeleteQueue"
+
+// DeleteQueueRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteQueue operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteQueue for more information on using the DeleteQueue
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteQueueRequest method.
+// req, resp := client.DeleteQueueRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteQueue
+func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) {
+ op := &request.Operation{
+ Name: opDeleteQueue,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteQueueInput{}
+ }
+
+ output = &DeleteQueueOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// DeleteQueue API operation for Amazon Simple Queue Service.
+//
+// Deletes the queue specified by the QueueUrl, regardless of the queue's contents.
+// If the specified queue doesn't exist, Amazon SQS returns a successful response.
+//
+// Be careful with the DeleteQueue action: When you delete a queue, any messages
+// in the queue are no longer available.
+//
+// When you delete a queue, the deletion process takes up to 60 seconds. Requests
+// you send involving that queue during the 60 seconds might succeed. For example,
+// a SendMessage request might succeed, but after 60 seconds the queue and the
+// message you sent no longer exist.
+//
+// When you delete a queue, you must wait at least 60 seconds before creating
+// a queue with the same name.
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation DeleteQueue for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/DeleteQueue
+func (c *SQS) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) {
+ req, out := c.DeleteQueueRequest(input)
+ return out, req.Send()
+}
+
+// DeleteQueueWithContext is the same as DeleteQueue with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteQueue for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) DeleteQueueWithContext(ctx aws.Context, input *DeleteQueueInput, opts ...request.Option) (*DeleteQueueOutput, error) {
+ req, out := c.DeleteQueueRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetQueueAttributes = "GetQueueAttributes"
+
+// GetQueueAttributesRequest generates a "aws/request.Request" representing the
+// client's request for the GetQueueAttributes operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetQueueAttributes for more information on using the GetQueueAttributes
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetQueueAttributesRequest method.
+// req, resp := client.GetQueueAttributesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueAttributes
+func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *request.Request, output *GetQueueAttributesOutput) {
+ op := &request.Operation{
+ Name: opGetQueueAttributes,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetQueueAttributesInput{}
+ }
+
+ output = &GetQueueAttributesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetQueueAttributes API operation for Amazon Simple Queue Service.
+//
+// Gets attributes for the specified queue.
+//
+// To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html),
+// you can check whether QueueName ends with the .fifo suffix.
+//
+// Some actions take lists of parameters. These lists are specified using the
+// param.n notation. Values of n are integers starting from 1. For example,
+// a parameter list with two elements looks like this:
+//
+// &Attribute.1=first
+//
+// &Attribute.2=second
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation GetQueueAttributes for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAttributeName "InvalidAttributeName"
+// The specified attribute doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueAttributes
+func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttributesOutput, error) {
+ req, out := c.GetQueueAttributesRequest(input)
+ return out, req.Send()
+}
+
+// GetQueueAttributesWithContext is the same as GetQueueAttributes with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetQueueAttributes for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) GetQueueAttributesWithContext(ctx aws.Context, input *GetQueueAttributesInput, opts ...request.Option) (*GetQueueAttributesOutput, error) {
+ req, out := c.GetQueueAttributesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetQueueUrl = "GetQueueUrl"
+
+// GetQueueUrlRequest generates a "aws/request.Request" representing the
+// client's request for the GetQueueUrl operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetQueueUrl for more information on using the GetQueueUrl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetQueueUrlRequest method.
+// req, resp := client.GetQueueUrlRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueUrl
+func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, output *GetQueueUrlOutput) {
+ op := &request.Operation{
+ Name: opGetQueueUrl,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetQueueUrlInput{}
+ }
+
+ output = &GetQueueUrlOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetQueueUrl API operation for Amazon Simple Queue Service.
+//
+// Returns the URL of an existing Amazon SQS queue.
+//
+// To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId
+// parameter to specify the account ID of the queue's owner. The queue's owner
+// must grant you permission to access the queue. For more information about
+// shared queue access, see AddPermission or see Allow Developers to Write Messages
+// to a Shared Queue (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-writing-an-sqs-policy.html#write-messages-to-shared-queue)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation GetQueueUrl for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeQueueDoesNotExist "AWS.SimpleQueueService.NonExistentQueue"
+// The specified queue doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/GetQueueUrl
+func (c *SQS) GetQueueUrl(input *GetQueueUrlInput) (*GetQueueUrlOutput, error) {
+ req, out := c.GetQueueUrlRequest(input)
+ return out, req.Send()
+}
+
+// GetQueueUrlWithContext is the same as GetQueueUrl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetQueueUrl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) GetQueueUrlWithContext(ctx aws.Context, input *GetQueueUrlInput, opts ...request.Option) (*GetQueueUrlOutput, error) {
+ req, out := c.GetQueueUrlRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListDeadLetterSourceQueues = "ListDeadLetterSourceQueues"
+
+// ListDeadLetterSourceQueuesRequest generates a "aws/request.Request" representing the
+// client's request for the ListDeadLetterSourceQueues operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListDeadLetterSourceQueues for more information on using the ListDeadLetterSourceQueues
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListDeadLetterSourceQueuesRequest method.
+// req, resp := client.ListDeadLetterSourceQueuesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListDeadLetterSourceQueues
+func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueuesInput) (req *request.Request, output *ListDeadLetterSourceQueuesOutput) {
+ op := &request.Operation{
+ Name: opListDeadLetterSourceQueues,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListDeadLetterSourceQueuesInput{}
+ }
+
+ output = &ListDeadLetterSourceQueuesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListDeadLetterSourceQueues API operation for Amazon Simple Queue Service.
+//
+// Returns a list of your queues that have the RedrivePolicy queue attribute
+// configured with a dead-letter queue.
+//
+// For more information about using dead-letter queues, see Using Amazon SQS
+// Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation ListDeadLetterSourceQueues for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeQueueDoesNotExist "AWS.SimpleQueueService.NonExistentQueue"
+// The specified queue doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListDeadLetterSourceQueues
+func (c *SQS) ListDeadLetterSourceQueues(input *ListDeadLetterSourceQueuesInput) (*ListDeadLetterSourceQueuesOutput, error) {
+ req, out := c.ListDeadLetterSourceQueuesRequest(input)
+ return out, req.Send()
+}
+
+// ListDeadLetterSourceQueuesWithContext is the same as ListDeadLetterSourceQueues with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListDeadLetterSourceQueues for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) ListDeadLetterSourceQueuesWithContext(ctx aws.Context, input *ListDeadLetterSourceQueuesInput, opts ...request.Option) (*ListDeadLetterSourceQueuesOutput, error) {
+ req, out := c.ListDeadLetterSourceQueuesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListQueueTags = "ListQueueTags"
+
+// ListQueueTagsRequest generates a "aws/request.Request" representing the
+// client's request for the ListQueueTags operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListQueueTags for more information on using the ListQueueTags
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListQueueTagsRequest method.
+// req, resp := client.ListQueueTagsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTags
+func (c *SQS) ListQueueTagsRequest(input *ListQueueTagsInput) (req *request.Request, output *ListQueueTagsOutput) {
+ op := &request.Operation{
+ Name: opListQueueTags,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListQueueTagsInput{}
+ }
+
+ output = &ListQueueTagsOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListQueueTags API operation for Amazon Simple Queue Service.
+//
+// List all cost allocation tags added to the specified Amazon SQS queue. For
+// an overview, see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation ListQueueTags for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueueTags
+func (c *SQS) ListQueueTags(input *ListQueueTagsInput) (*ListQueueTagsOutput, error) {
+ req, out := c.ListQueueTagsRequest(input)
+ return out, req.Send()
+}
+
+// ListQueueTagsWithContext is the same as ListQueueTags with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListQueueTags for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) ListQueueTagsWithContext(ctx aws.Context, input *ListQueueTagsInput, opts ...request.Option) (*ListQueueTagsOutput, error) {
+ req, out := c.ListQueueTagsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListQueues = "ListQueues"
+
+// ListQueuesRequest generates a "aws/request.Request" representing the
+// client's request for the ListQueues operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListQueues for more information on using the ListQueues
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListQueuesRequest method.
+// req, resp := client.ListQueuesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueues
+func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) {
+ op := &request.Operation{
+ Name: opListQueues,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListQueuesInput{}
+ }
+
+ output = &ListQueuesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListQueues API operation for Amazon Simple Queue Service.
+//
+// Returns a list of your queues. The maximum number of queues that can be returned
+// is 1,000. If you specify a value for the optional QueueNamePrefix parameter,
+// only queues with a name that begins with the specified value are returned.
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation ListQueues for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ListQueues
+func (c *SQS) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) {
+ req, out := c.ListQueuesRequest(input)
+ return out, req.Send()
+}
+
+// ListQueuesWithContext is the same as ListQueues with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListQueues for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opts ...request.Option) (*ListQueuesOutput, error) {
+ req, out := c.ListQueuesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPurgeQueue = "PurgeQueue"
+
+// PurgeQueueRequest generates a "aws/request.Request" representing the
+// client's request for the PurgeQueue operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PurgeQueue for more information on using the PurgeQueue
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PurgeQueueRequest method.
+// req, resp := client.PurgeQueueRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/PurgeQueue
+func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, output *PurgeQueueOutput) {
+ op := &request.Operation{
+ Name: opPurgeQueue,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PurgeQueueInput{}
+ }
+
+ output = &PurgeQueueOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// PurgeQueue API operation for Amazon Simple Queue Service.
+//
+// Deletes the messages in a queue specified by the QueueURL parameter.
+//
+// When you use the PurgeQueue action, you can't retrieve any messages deleted
+// from a queue.
+//
+// The message deletion process takes up to 60 seconds. We recommend waiting
+// for 60 seconds regardless of your queue's size.
+//
+// Messages sent to the queue before you call PurgeQueue might be received but
+// are deleted within the next minute.
+//
+// Messages sent to the queue after you call PurgeQueue might be deleted while
+// the queue is being purged.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation PurgeQueue for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeQueueDoesNotExist "AWS.SimpleQueueService.NonExistentQueue"
+// The specified queue doesn't exist.
+//
+// * ErrCodePurgeQueueInProgress "AWS.SimpleQueueService.PurgeQueueInProgress"
+// Indicates that the specified queue previously received a PurgeQueue request
+// within the last 60 seconds (the time it can take to delete the messages in
+// the queue).
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/PurgeQueue
+func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) {
+ req, out := c.PurgeQueueRequest(input)
+ return out, req.Send()
+}
+
+// PurgeQueueWithContext is the same as PurgeQueue with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PurgeQueue for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) PurgeQueueWithContext(ctx aws.Context, input *PurgeQueueInput, opts ...request.Option) (*PurgeQueueOutput, error) {
+ req, out := c.PurgeQueueRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opReceiveMessage = "ReceiveMessage"
+
+// ReceiveMessageRequest generates a "aws/request.Request" representing the
+// client's request for the ReceiveMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ReceiveMessage for more information on using the ReceiveMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ReceiveMessageRequest method.
+// req, resp := client.ReceiveMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ReceiveMessage
+func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Request, output *ReceiveMessageOutput) {
+ op := &request.Operation{
+ Name: opReceiveMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ReceiveMessageInput{}
+ }
+
+ output = &ReceiveMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ReceiveMessage API operation for Amazon Simple Queue Service.
+//
+// Retrieves one or more messages (up to 10), from the specified queue. Using
+// the WaitTimeSeconds parameter enables long-poll support. For more information,
+// see Amazon SQS Long Polling (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Short poll is the default behavior where a weighted random set of machines
+// is sampled on a ReceiveMessage call. Thus, only the messages on the sampled
+// machines are returned. If the number of messages in the queue is small (fewer
+// than 1,000), you most likely get fewer messages than you requested per ReceiveMessage
+// call. If the number of messages in the queue is extremely small, you might
+// not receive any messages in a particular ReceiveMessage response. If this
+// happens, repeat the request.
+//
+// For each message returned, the response includes the following:
+//
+// * The message body.
+//
+// * An MD5 digest of the message body. For information about MD5, see RFC1321
+// (https://www.ietf.org/rfc/rfc1321.txt).
+//
+// * The MessageId you received when you sent the message to the queue.
+//
+// * The receipt handle.
+//
+// * The message attributes.
+//
+// * An MD5 digest of the message attributes.
+//
+// The receipt handle is the identifier you must provide when deleting the message.
+// For more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// You can provide the VisibilityTimeout parameter in your request. The parameter
+// is applied to the messages that Amazon SQS returns in the response. If you
+// don't include the parameter, the overall visibility timeout for the queue
+// is used for the returned messages. For more information, see Visibility Timeout
+// (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// A message that isn't deleted or a message whose visibility isn't extended
+// before the visibility timeout expires counts as a failed receive. Depending
+// on the configuration of the queue, the message might be sent to the dead-letter
+// queue.
+//
+// In the future, new attributes might be added. If you write code that calls
+// this action, we recommend that you structure your code so that it can handle
+// new attributes gracefully.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation ReceiveMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeOverLimit "OverLimit"
+// The specified action violates a limit. For example, ReceiveMessage returns
+// this error if the maximum number of inflight messages is reached and AddPermission
+// returns this error if the maximum number of permissions for the queue is
+// reached.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/ReceiveMessage
+func (c *SQS) ReceiveMessage(input *ReceiveMessageInput) (*ReceiveMessageOutput, error) {
+ req, out := c.ReceiveMessageRequest(input)
+ return out, req.Send()
+}
+
+// ReceiveMessageWithContext is the same as ReceiveMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ReceiveMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) ReceiveMessageWithContext(ctx aws.Context, input *ReceiveMessageInput, opts ...request.Option) (*ReceiveMessageOutput, error) {
+ req, out := c.ReceiveMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opRemovePermission = "RemovePermission"
+
+// RemovePermissionRequest generates a "aws/request.Request" representing the
+// client's request for the RemovePermission operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See RemovePermission for more information on using the RemovePermission
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the RemovePermissionRequest method.
+// req, resp := client.RemovePermissionRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/RemovePermission
+func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) {
+ op := &request.Operation{
+ Name: opRemovePermission,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &RemovePermissionInput{}
+ }
+
+ output = &RemovePermissionOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// RemovePermission API operation for Amazon Simple Queue Service.
+//
+// Revokes any permissions in the queue policy that matches the specified Label
+// parameter.
+//
+// * Only the owner of a queue can remove permissions from it.
+//
+// * Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// * To remove the ability to change queue permissions, you must deny permission
+// to the AddPermission, RemovePermission, and SetQueueAttributes actions
+// in your IAM policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation RemovePermission for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/RemovePermission
+func (c *SQS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionOutput, error) {
+ req, out := c.RemovePermissionRequest(input)
+ return out, req.Send()
+}
+
+// RemovePermissionWithContext is the same as RemovePermission with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RemovePermission for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) RemovePermissionWithContext(ctx aws.Context, input *RemovePermissionInput, opts ...request.Option) (*RemovePermissionOutput, error) {
+ req, out := c.RemovePermissionRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSendMessage = "SendMessage"
+
+// SendMessageRequest generates a "aws/request.Request" representing the
+// client's request for the SendMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SendMessage for more information on using the SendMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SendMessageRequest method.
+// req, resp := client.SendMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessage
+func (c *SQS) SendMessageRequest(input *SendMessageInput) (req *request.Request, output *SendMessageOutput) {
+ op := &request.Operation{
+ Name: opSendMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SendMessageInput{}
+ }
+
+ output = &SendMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// SendMessage API operation for Amazon Simple Queue Service.
+//
+// Delivers a message to the specified queue.
+//
+// A message can include only XML, JSON, and unformatted text. The following
+// Unicode characters are allowed:
+//
+// #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
+//
+// Any characters not included in this list will be rejected. For more information,
+// see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation SendMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidMessageContents "InvalidMessageContents"
+// The message contains characters outside the allowed set.
+//
+// * ErrCodeUnsupportedOperation "AWS.SimpleQueueService.UnsupportedOperation"
+// Error code 400. Unsupported operation.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessage
+func (c *SQS) SendMessage(input *SendMessageInput) (*SendMessageOutput, error) {
+ req, out := c.SendMessageRequest(input)
+ return out, req.Send()
+}
+
+// SendMessageWithContext is the same as SendMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SendMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) SendMessageWithContext(ctx aws.Context, input *SendMessageInput, opts ...request.Option) (*SendMessageOutput, error) {
+ req, out := c.SendMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSendMessageBatch = "SendMessageBatch"
+
+// SendMessageBatchRequest generates a "aws/request.Request" representing the
+// client's request for the SendMessageBatch operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SendMessageBatch for more information on using the SendMessageBatch
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SendMessageBatchRequest method.
+// req, resp := client.SendMessageBatchRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessageBatch
+func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *request.Request, output *SendMessageBatchOutput) {
+ op := &request.Operation{
+ Name: opSendMessageBatch,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SendMessageBatchInput{}
+ }
+
+ output = &SendMessageBatchOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// SendMessageBatch API operation for Amazon Simple Queue Service.
+//
+// Delivers up to ten messages to the specified queue. This is a batch version
+// of SendMessage. For a FIFO queue, multiple messages within a single batch
+// are enqueued in the order they are sent.
+//
+// The result of sending each message is reported individually in the response.
+// Because the batch request can result in a combination of successful and unsuccessful
+// actions, you should check for batch errors even when the call returns an
+// HTTP status code of 200.
+//
+// The maximum allowed individual message size and the maximum total payload
+// size (the sum of the individual lengths of all of the batched messages) are
+// both 256 KB (262,144 bytes).
+//
+// A message can include only XML, JSON, and unformatted text. The following
+// Unicode characters are allowed:
+//
+// #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
+//
+// Any characters not included in this list will be rejected. For more information,
+// see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets).
+//
+// If you don't specify the DelaySeconds parameter for an entry, Amazon SQS
+// uses the default value for the queue.
+//
+// Some actions take lists of parameters. These lists are specified using the
+// param.n notation. Values of n are integers starting from 1. For example,
+// a parameter list with two elements looks like this:
+//
+// &Attribute.1=first
+//
+// &Attribute.2=second
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation SendMessageBatch for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeTooManyEntriesInBatchRequest "AWS.SimpleQueueService.TooManyEntriesInBatchRequest"
+// The batch request contains more entries than permissible.
+//
+// * ErrCodeEmptyBatchRequest "AWS.SimpleQueueService.EmptyBatchRequest"
+// The batch request doesn't contain any entries.
+//
+// * ErrCodeBatchEntryIdsNotDistinct "AWS.SimpleQueueService.BatchEntryIdsNotDistinct"
+// Two or more batch entries in the request have the same Id.
+//
+// * ErrCodeBatchRequestTooLong "AWS.SimpleQueueService.BatchRequestTooLong"
+// The length of all the messages put together is more than the limit.
+//
+// * ErrCodeInvalidBatchEntryId "AWS.SimpleQueueService.InvalidBatchEntryId"
+// The Id of a batch entry in a batch request doesn't abide by the specification.
+//
+// * ErrCodeUnsupportedOperation "AWS.SimpleQueueService.UnsupportedOperation"
+// Error code 400. Unsupported operation.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SendMessageBatch
+func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchOutput, error) {
+ req, out := c.SendMessageBatchRequest(input)
+ return out, req.Send()
+}
+
+// SendMessageBatchWithContext is the same as SendMessageBatch with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SendMessageBatch for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) SendMessageBatchWithContext(ctx aws.Context, input *SendMessageBatchInput, opts ...request.Option) (*SendMessageBatchOutput, error) {
+ req, out := c.SendMessageBatchRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSetQueueAttributes = "SetQueueAttributes"
+
+// SetQueueAttributesRequest generates a "aws/request.Request" representing the
+// client's request for the SetQueueAttributes operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SetQueueAttributes for more information on using the SetQueueAttributes
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SetQueueAttributesRequest method.
+// req, resp := client.SetQueueAttributesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SetQueueAttributes
+func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *request.Request, output *SetQueueAttributesOutput) {
+ op := &request.Operation{
+ Name: opSetQueueAttributes,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SetQueueAttributesInput{}
+ }
+
+ output = &SetQueueAttributesOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// SetQueueAttributes API operation for Amazon Simple Queue Service.
+//
+// Sets the value of one or more queue attributes. When you change a queue's
+// attributes, the change can take up to 60 seconds for most of the attributes
+// to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod
+// attribute can take up to 15 minutes.
+//
+// * In the future, new attributes might be added. If you write code that
+// calls this action, we recommend that you structure your code so that it
+// can handle new attributes gracefully.
+//
+// * Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// * To remove the ability to change queue permissions, you must deny permission
+// to the AddPermission, RemovePermission, and SetQueueAttributes actions
+// in your IAM policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation SetQueueAttributes for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAttributeName "InvalidAttributeName"
+// The specified attribute doesn't exist.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/SetQueueAttributes
+func (c *SQS) SetQueueAttributes(input *SetQueueAttributesInput) (*SetQueueAttributesOutput, error) {
+ req, out := c.SetQueueAttributesRequest(input)
+ return out, req.Send()
+}
+
+// SetQueueAttributesWithContext is the same as SetQueueAttributes with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SetQueueAttributes for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) SetQueueAttributesWithContext(ctx aws.Context, input *SetQueueAttributesInput, opts ...request.Option) (*SetQueueAttributesOutput, error) {
+ req, out := c.SetQueueAttributesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opTagQueue = "TagQueue"
+
+// TagQueueRequest generates a "aws/request.Request" representing the
+// client's request for the TagQueue operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TagQueue for more information on using the TagQueue
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TagQueueRequest method.
+// req, resp := client.TagQueueRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueue
+func (c *SQS) TagQueueRequest(input *TagQueueInput) (req *request.Request, output *TagQueueOutput) {
+ op := &request.Operation{
+ Name: opTagQueue,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TagQueueInput{}
+ }
+
+ output = &TagQueueOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// TagQueue API operation for Amazon Simple Queue Service.
+//
+// Add cost allocation tags to the specified Amazon SQS queue. For an overview,
+// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// When you use queue tags, keep the following guidelines in mind:
+//
+// * Adding more than 50 tags to a queue isn't recommended.
+//
+// * Tags don't have any semantic meaning. Amazon SQS interprets tags as
+// character strings.
+//
+// * Tags are case-sensitive.
+//
+// * A new tag with a key identical to that of an existing tag overwrites
+// the existing tag.
+//
+// For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation TagQueue for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/TagQueue
+func (c *SQS) TagQueue(input *TagQueueInput) (*TagQueueOutput, error) {
+ req, out := c.TagQueueRequest(input)
+ return out, req.Send()
+}
+
+// TagQueueWithContext is the same as TagQueue with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TagQueue for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) TagQueueWithContext(ctx aws.Context, input *TagQueueInput, opts ...request.Option) (*TagQueueOutput, error) {
+ req, out := c.TagQueueRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUntagQueue = "UntagQueue"
+
+// UntagQueueRequest generates a "aws/request.Request" representing the
+// client's request for the UntagQueue operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UntagQueue for more information on using the UntagQueue
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UntagQueueRequest method.
+// req, resp := client.UntagQueueRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueue
+func (c *SQS) UntagQueueRequest(input *UntagQueueInput) (req *request.Request, output *UntagQueueOutput) {
+ op := &request.Operation{
+ Name: opUntagQueue,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UntagQueueInput{}
+ }
+
+ output = &UntagQueueOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// UntagQueue API operation for Amazon Simple Queue Service.
+//
+// Remove cost allocation tags from the specified Amazon SQS queue. For an overview,
+// see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Cross-account permissions don't apply to this action. For more information,
+// see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+// in the Amazon Simple Queue Service Developer Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Queue Service's
+// API operation UntagQueue for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05/UntagQueue
+func (c *SQS) UntagQueue(input *UntagQueueInput) (*UntagQueueOutput, error) {
+ req, out := c.UntagQueueRequest(input)
+ return out, req.Send()
+}
+
+// UntagQueueWithContext is the same as UntagQueue with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UntagQueue for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *SQS) UntagQueueWithContext(ctx aws.Context, input *UntagQueueInput, opts ...request.Option) (*UntagQueueOutput, error) {
+ req, out := c.UntagQueueRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+type AddPermissionInput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account number of the principal (https://docs.aws.amazon.com/general/latest/gr/glos-chap.html#P)
+ // who is given permission. The principal must have an AWS account, but does
+ // not need to be signed up for Amazon SQS. For information about locating the
+ // AWS account identification, see Your AWS Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html#sqs-api-request-authentication)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // AWSAccountIds is a required field
+ AWSAccountIds []*string `locationNameList:"AWSAccountId" type:"list" flattened:"true" required:"true"`
+
+ // The action the client wants to allow for the specified principal. Valid values:
+ // the name of any action or *.
+ //
+ // For more information about these actions, see Overview of Managing Access
+ // Permissions to Your Amazon Simple Queue Service Resource (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-overview-of-managing-access.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // Specifying SendMessage, DeleteMessage, or ChangeMessageVisibility for ActionName.n
+ // also grants permissions for the corresponding batch versions of those actions:
+ // SendMessageBatch, DeleteMessageBatch, and ChangeMessageVisibilityBatch.
+ //
+ // Actions is a required field
+ Actions []*string `locationNameList:"ActionName" type:"list" flattened:"true" required:"true"`
+
+ // The unique identification of the permission you're setting (for example,
+ // AliceSendMessage). Maximum 80 characters. Allowed characters include alphanumeric
+ // characters, hyphens (-), and underscores (_).
+ //
+ // Label is a required field
+ Label *string `type:"string" required:"true"`
+
+ // The URL of the Amazon SQS queue to which permissions are added.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AddPermissionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AddPermissionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AddPermissionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AddPermissionInput"}
+ if s.AWSAccountIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("AWSAccountIds"))
+ }
+ if s.Actions == nil {
+ invalidParams.Add(request.NewErrParamRequired("Actions"))
+ }
+ if s.Label == nil {
+ invalidParams.Add(request.NewErrParamRequired("Label"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAWSAccountIds sets the AWSAccountIds field's value.
+func (s *AddPermissionInput) SetAWSAccountIds(v []*string) *AddPermissionInput {
+ s.AWSAccountIds = v
+ return s
+}
+
+// SetActions sets the Actions field's value.
+func (s *AddPermissionInput) SetActions(v []*string) *AddPermissionInput {
+ s.Actions = v
+ return s
+}
+
+// SetLabel sets the Label field's value.
+func (s *AddPermissionInput) SetLabel(v string) *AddPermissionInput {
+ s.Label = &v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *AddPermissionInput) SetQueueUrl(v string) *AddPermissionInput {
+ s.QueueUrl = &v
+ return s
+}
+
+type AddPermissionOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s AddPermissionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AddPermissionOutput) GoString() string {
+ return s.String()
+}
+
+// Gives a detailed description of the result of an action on each entry in
+// the request.
+type BatchResultErrorEntry struct {
+ _ struct{} `type:"structure"`
+
+ // An error code representing why the action failed on this entry.
+ //
+ // Code is a required field
+ Code *string `type:"string" required:"true"`
+
+ // The Id of an entry in a batch request.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // A message explaining why the action failed on this entry.
+ Message *string `type:"string"`
+
+ // Specifies whether the error happened due to the producer.
+ //
+ // SenderFault is a required field
+ SenderFault *bool `type:"boolean" required:"true"`
+}
+
+// String returns the string representation
+func (s BatchResultErrorEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchResultErrorEntry) GoString() string {
+ return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *BatchResultErrorEntry) SetCode(v string) *BatchResultErrorEntry {
+ s.Code = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *BatchResultErrorEntry) SetId(v string) *BatchResultErrorEntry {
+ s.Id = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *BatchResultErrorEntry) SetMessage(v string) *BatchResultErrorEntry {
+ s.Message = &v
+ return s
+}
+
+// SetSenderFault sets the SenderFault field's value.
+func (s *BatchResultErrorEntry) SetSenderFault(v bool) *BatchResultErrorEntry {
+ s.SenderFault = &v
+ return s
+}
+
+type ChangeMessageVisibilityBatchInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of receipt handles of the messages for which the visibility timeout
+ // must be changed.
+ //
+ // Entries is a required field
+ Entries []*ChangeMessageVisibilityBatchRequestEntry `locationNameList:"ChangeMessageVisibilityBatchRequestEntry" type:"list" flattened:"true" required:"true"`
+
+ // The URL of the Amazon SQS queue whose messages' visibility is changed.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ChangeMessageVisibilityBatchInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ChangeMessageVisibilityBatchInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ChangeMessageVisibilityBatchInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityBatchInput"}
+ if s.Entries == nil {
+ invalidParams.Add(request.NewErrParamRequired("Entries"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.Entries != nil {
+ for i, v := range s.Entries {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEntries sets the Entries field's value.
+func (s *ChangeMessageVisibilityBatchInput) SetEntries(v []*ChangeMessageVisibilityBatchRequestEntry) *ChangeMessageVisibilityBatchInput {
+ s.Entries = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *ChangeMessageVisibilityBatchInput) SetQueueUrl(v string) *ChangeMessageVisibilityBatchInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// For each message in the batch, the response contains a ChangeMessageVisibilityBatchResultEntry
+// tag if the message succeeds or a BatchResultErrorEntry tag if the message
+// fails.
+type ChangeMessageVisibilityBatchOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of BatchResultErrorEntry items.
+ //
+ // Failed is a required field
+ Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"`
+
+ // A list of ChangeMessageVisibilityBatchResultEntry items.
+ //
+ // Successful is a required field
+ Successful []*ChangeMessageVisibilityBatchResultEntry `locationNameList:"ChangeMessageVisibilityBatchResultEntry" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ChangeMessageVisibilityBatchOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ChangeMessageVisibilityBatchOutput) GoString() string {
+ return s.String()
+}
+
+// SetFailed sets the Failed field's value.
+func (s *ChangeMessageVisibilityBatchOutput) SetFailed(v []*BatchResultErrorEntry) *ChangeMessageVisibilityBatchOutput {
+ s.Failed = v
+ return s
+}
+
+// SetSuccessful sets the Successful field's value.
+func (s *ChangeMessageVisibilityBatchOutput) SetSuccessful(v []*ChangeMessageVisibilityBatchResultEntry) *ChangeMessageVisibilityBatchOutput {
+ s.Successful = v
+ return s
+}
+
+// Encloses a receipt handle and an entry id for each message in ChangeMessageVisibilityBatch.
+//
+// All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n,
+// where n is an integer value starting with 1. For example, a parameter list
+// for this action might look like this:
+//
+// &ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2
+//
+// &ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=your_receipt_handle
+//
+// &ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45
+type ChangeMessageVisibilityBatchRequestEntry struct {
+ _ struct{} `type:"structure"`
+
+ // An identifier for this particular receipt handle used to communicate the
+ // result.
+ //
+ // The Ids of a batch request need to be unique within a request
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // A receipt handle.
+ //
+ // ReceiptHandle is a required field
+ ReceiptHandle *string `type:"string" required:"true"`
+
+ // The new value (in seconds) for the message's visibility timeout.
+ VisibilityTimeout *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s ChangeMessageVisibilityBatchRequestEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ChangeMessageVisibilityBatchRequestEntry) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ChangeMessageVisibilityBatchRequestEntry) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityBatchRequestEntry"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.ReceiptHandle == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReceiptHandle"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetId sets the Id field's value.
+func (s *ChangeMessageVisibilityBatchRequestEntry) SetId(v string) *ChangeMessageVisibilityBatchRequestEntry {
+ s.Id = &v
+ return s
+}
+
+// SetReceiptHandle sets the ReceiptHandle field's value.
+func (s *ChangeMessageVisibilityBatchRequestEntry) SetReceiptHandle(v string) *ChangeMessageVisibilityBatchRequestEntry {
+ s.ReceiptHandle = &v
+ return s
+}
+
+// SetVisibilityTimeout sets the VisibilityTimeout field's value.
+func (s *ChangeMessageVisibilityBatchRequestEntry) SetVisibilityTimeout(v int64) *ChangeMessageVisibilityBatchRequestEntry {
+ s.VisibilityTimeout = &v
+ return s
+}
+
+// Encloses the Id of an entry in ChangeMessageVisibilityBatch.
+type ChangeMessageVisibilityBatchResultEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Represents a message whose visibility timeout has been changed successfully.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ChangeMessageVisibilityBatchResultEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ChangeMessageVisibilityBatchResultEntry) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *ChangeMessageVisibilityBatchResultEntry) SetId(v string) *ChangeMessageVisibilityBatchResultEntry {
+ s.Id = &v
+ return s
+}
+
+type ChangeMessageVisibilityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the Amazon SQS queue whose message's visibility is changed.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+
+ // The receipt handle associated with the message whose visibility timeout is
+ // changed. This parameter is returned by the ReceiveMessage action.
+ //
+ // ReceiptHandle is a required field
+ ReceiptHandle *string `type:"string" required:"true"`
+
+ // The new value for the message's visibility timeout (in seconds). Values values:
+ // 0 to 43200. Maximum: 12 hours.
+ //
+ // VisibilityTimeout is a required field
+ VisibilityTimeout *int64 `type:"integer" required:"true"`
+}
+
+// String returns the string representation
+func (s ChangeMessageVisibilityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ChangeMessageVisibilityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ChangeMessageVisibilityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ChangeMessageVisibilityInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.ReceiptHandle == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReceiptHandle"))
+ }
+ if s.VisibilityTimeout == nil {
+ invalidParams.Add(request.NewErrParamRequired("VisibilityTimeout"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *ChangeMessageVisibilityInput) SetQueueUrl(v string) *ChangeMessageVisibilityInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// SetReceiptHandle sets the ReceiptHandle field's value.
+func (s *ChangeMessageVisibilityInput) SetReceiptHandle(v string) *ChangeMessageVisibilityInput {
+ s.ReceiptHandle = &v
+ return s
+}
+
+// SetVisibilityTimeout sets the VisibilityTimeout field's value.
+func (s *ChangeMessageVisibilityInput) SetVisibilityTimeout(v int64) *ChangeMessageVisibilityInput {
+ s.VisibilityTimeout = &v
+ return s
+}
+
+type ChangeMessageVisibilityOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s ChangeMessageVisibilityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ChangeMessageVisibilityOutput) GoString() string {
+ return s.String()
+}
+
+type CreateQueueInput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attributes with their corresponding values.
+ //
+ // The following lists the names, descriptions, and values of the special request
+ // parameters that the CreateQueue action uses:
+ //
+ // * DelaySeconds - The length of time, in seconds, for which the delivery
+ // of all messages in the queue is delayed. Valid values: An integer from
+ // 0 to 900 seconds (15 minutes). Default: 0.
+ //
+ // * MaximumMessageSize - The limit of how many bytes a message can contain
+ // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes
+ // (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).
+ //
+ // * MessageRetentionPeriod - The length of time, in seconds, for which Amazon
+ // SQS retains a message. Valid values: An integer from 60 seconds (1 minute)
+ // to 1,209,600 seconds (14 days). Default: 345,600 (4 days).
+ //
+ // * Policy - The queue's policy. A valid AWS policy. For more information
+ // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html)
+ // in the Amazon IAM User Guide.
+ //
+ // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for
+ // which a ReceiveMessage action waits for a message to arrive. Valid values:
+ // An integer from 0 to 20 (seconds). Default: 0.
+ //
+ // * RedrivePolicy - The string that includes the parameters for the dead-letter
+ // queue functionality of the source queue. For more information about the
+ // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter
+ // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html)
+ // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn
+ // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon
+ // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount
+ // - The number of times a message is delivered to the source queue before
+ // being moved to the dead-letter queue. When the ReceiveCount for a message
+ // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message
+ // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also
+ // be a FIFO queue. Similarly, the dead-letter queue of a standard queue
+ // must also be a standard queue.
+ //
+ // * VisibilityTimeout - The visibility timeout for the queue, in seconds.
+ // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For
+ // more information about the visibility timeout, see Visibility Timeout
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html):
+ //
+ // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK)
+ // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms).
+ // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs,
+ // the alias of a custom CMK can, for example, be alias/MyAlias . For more
+ // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters)
+ // in the AWS Key Management Service API Reference.
+ //
+ // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which
+ // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys)
+ // to encrypt or decrypt messages before calling AWS KMS again. An integer
+ // representing seconds, between 60 seconds (1 minute) and 86,400 seconds
+ // (24 hours). Default: 300 (5 minutes). A shorter time period provides better
+ // security but results in more calls to KMS which might incur charges after
+ // Free Tier. For more information, see How Does the Data Key Reuse Period
+ // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work).
+ //
+ // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html):
+ //
+ // * FifoQueue - Designates a queue as FIFO. Valid values: true, false. If
+ // you don't specify the FifoQueue attribute, Amazon SQS creates a standard
+ // queue. You can provide this attribute only during queue creation. You
+ // can't change it for an existing queue. When you set this attribute, you
+ // must also provide the MessageGroupId for your messages explicitly. For
+ // more information, see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // * ContentBasedDeduplication - Enables content-based deduplication. Valid
+ // values: true, false. For more information, see Exactly-Once Processing
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)
+ // in the Amazon Simple Queue Service Developer Guide. Every message must
+ // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId
+ // explicitly. If you aren't able to provide a MessageDeduplicationId and
+ // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a
+ // SHA-256 hash to generate the MessageDeduplicationId using the body of
+ // the message (but not the attributes of the message). If you don't provide
+ // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication
+ // set, the action fails with an error. If the queue has ContentBasedDeduplication
+ // set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication
+ // is in effect, messages with identical content sent within the deduplication
+ // interval are treated as duplicates and only one copy of the message is
+ // delivered. If you send one message with ContentBasedDeduplication enabled
+ // and then another message with a MessageDeduplicationId that is the same
+ // as the one generated for the first MessageDeduplicationId, the two messages
+ // are treated as duplicates and only one copy of the message is delivered.
+ Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ // The name of the new queue. The following limits apply to this name:
+ //
+ // * A queue name can have up to 80 characters.
+ //
+ // * Valid values: alphanumeric characters, hyphens (-), and underscores
+ // (_).
+ //
+ // * A FIFO queue name must end with the .fifo suffix.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueName is a required field
+ QueueName *string `type:"string" required:"true"`
+
+ // Add cost allocation tags to the specified Amazon SQS queue. For an overview,
+ // see Tagging Your Amazon SQS Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-tags.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // When you use queue tags, keep the following guidelines in mind:
+ //
+ // * Adding more than 50 tags to a queue isn't recommended.
+ //
+ // * Tags don't have any semantic meaning. Amazon SQS interprets tags as
+ // character strings.
+ //
+ // * Tags are case-sensitive.
+ //
+ // * A new tag with a key identical to that of an existing tag overwrites
+ // the existing tag.
+ //
+ // For a full list of tag restrictions, see Limits Related to Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-limits.html#limits-queues)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // To be able to tag a queue on creation, you must have the sqs:CreateQueue
+ // and sqs:TagQueue permissions.
+ //
+ // Cross-account permissions don't apply to this action. For more information,
+ // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name)
+ // in the Amazon Simple Queue Service Developer Guide.
+ Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"`
+}
+
+// String returns the string representation
+func (s CreateQueueInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateQueueInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateQueueInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateQueueInput"}
+ if s.QueueName == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *CreateQueueInput) SetAttributes(v map[string]*string) *CreateQueueInput {
+ s.Attributes = v
+ return s
+}
+
+// SetQueueName sets the QueueName field's value.
+func (s *CreateQueueInput) SetQueueName(v string) *CreateQueueInput {
+ s.QueueName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateQueueInput) SetTags(v map[string]*string) *CreateQueueInput {
+ s.Tags = v
+ return s
+}
+
+// Returns the QueueUrl attribute of the created queue.
+type CreateQueueOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the created Amazon SQS queue.
+ QueueUrl *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CreateQueueOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateQueueOutput) GoString() string {
+ return s.String()
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *CreateQueueOutput) SetQueueUrl(v string) *CreateQueueOutput {
+ s.QueueUrl = &v
+ return s
+}
+
+type DeleteMessageBatchInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of receipt handles for the messages to be deleted.
+ //
+ // Entries is a required field
+ Entries []*DeleteMessageBatchRequestEntry `locationNameList:"DeleteMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"`
+
+ // The URL of the Amazon SQS queue from which messages are deleted.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteMessageBatchInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMessageBatchInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteMessageBatchInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteMessageBatchInput"}
+ if s.Entries == nil {
+ invalidParams.Add(request.NewErrParamRequired("Entries"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.Entries != nil {
+ for i, v := range s.Entries {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEntries sets the Entries field's value.
+func (s *DeleteMessageBatchInput) SetEntries(v []*DeleteMessageBatchRequestEntry) *DeleteMessageBatchInput {
+ s.Entries = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *DeleteMessageBatchInput) SetQueueUrl(v string) *DeleteMessageBatchInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// For each message in the batch, the response contains a DeleteMessageBatchResultEntry
+// tag if the message is deleted or a BatchResultErrorEntry tag if the message
+// can't be deleted.
+type DeleteMessageBatchOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of BatchResultErrorEntry items.
+ //
+ // Failed is a required field
+ Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"`
+
+ // A list of DeleteMessageBatchResultEntry items.
+ //
+ // Successful is a required field
+ Successful []*DeleteMessageBatchResultEntry `locationNameList:"DeleteMessageBatchResultEntry" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteMessageBatchOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMessageBatchOutput) GoString() string {
+ return s.String()
+}
+
+// SetFailed sets the Failed field's value.
+func (s *DeleteMessageBatchOutput) SetFailed(v []*BatchResultErrorEntry) *DeleteMessageBatchOutput {
+ s.Failed = v
+ return s
+}
+
+// SetSuccessful sets the Successful field's value.
+func (s *DeleteMessageBatchOutput) SetSuccessful(v []*DeleteMessageBatchResultEntry) *DeleteMessageBatchOutput {
+ s.Successful = v
+ return s
+}
+
+// Encloses a receipt handle and an identifier for it.
+type DeleteMessageBatchRequestEntry struct {
+ _ struct{} `type:"structure"`
+
+ // An identifier for this particular receipt handle. This is used to communicate
+ // the result.
+ //
+ // The Ids of a batch request need to be unique within a request
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // A receipt handle.
+ //
+ // ReceiptHandle is a required field
+ ReceiptHandle *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteMessageBatchRequestEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMessageBatchRequestEntry) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteMessageBatchRequestEntry) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteMessageBatchRequestEntry"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.ReceiptHandle == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReceiptHandle"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteMessageBatchRequestEntry) SetId(v string) *DeleteMessageBatchRequestEntry {
+ s.Id = &v
+ return s
+}
+
+// SetReceiptHandle sets the ReceiptHandle field's value.
+func (s *DeleteMessageBatchRequestEntry) SetReceiptHandle(v string) *DeleteMessageBatchRequestEntry {
+ s.ReceiptHandle = &v
+ return s
+}
+
+// Encloses the Id of an entry in DeleteMessageBatch.
+type DeleteMessageBatchResultEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Represents a successfully deleted message.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteMessageBatchResultEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMessageBatchResultEntry) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteMessageBatchResultEntry) SetId(v string) *DeleteMessageBatchResultEntry {
+ s.Id = &v
+ return s
+}
+
+type DeleteMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the Amazon SQS queue from which messages are deleted.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+
+ // The receipt handle associated with the message to delete.
+ //
+ // ReceiptHandle is a required field
+ ReceiptHandle *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteMessageInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.ReceiptHandle == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReceiptHandle"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *DeleteMessageInput) SetQueueUrl(v string) *DeleteMessageInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// SetReceiptHandle sets the ReceiptHandle field's value.
+func (s *DeleteMessageInput) SetReceiptHandle(v string) *DeleteMessageInput {
+ s.ReceiptHandle = &v
+ return s
+}
+
+type DeleteMessageOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMessageOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteQueueInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the Amazon SQS queue to delete.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteQueueInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteQueueInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteQueueInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteQueueInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *DeleteQueueInput) SetQueueUrl(v string) *DeleteQueueInput {
+ s.QueueUrl = &v
+ return s
+}
+
+type DeleteQueueOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteQueueOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteQueueOutput) GoString() string {
+ return s.String()
+}
+
+type GetQueueAttributesInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of attributes for which to retrieve information.
+ //
+ // In the future, new attributes might be added. If you write code that calls
+ // this action, we recommend that you structure your code so that it can handle
+ // new attributes gracefully.
+ //
+ // The following attributes are supported:
+ //
+ // * All - Returns all values.
+ //
+ // * ApproximateNumberOfMessages - Returns the approximate number of messages
+ // available for retrieval from the queue.
+ //
+ // * ApproximateNumberOfMessagesDelayed - Returns the approximate number
+ // of messages in the queue that are delayed and not available for reading
+ // immediately. This can happen when the queue is configured as a delay queue
+ // or when a message has been sent with a delay parameter.
+ //
+ // * ApproximateNumberOfMessagesNotVisible - Returns the approximate number
+ // of messages that are in flight. Messages are considered to be in flight
+ // if they have been sent to a client but have not yet been deleted or have
+ // not yet reached the end of their visibility window.
+ //
+ // * CreatedTimestamp - Returns the time when the queue was created in seconds
+ // (epoch time (http://en.wikipedia.org/wiki/Unix_time)).
+ //
+ // * DelaySeconds - Returns the default delay on the queue in seconds.
+ //
+ // * LastModifiedTimestamp - Returns the time when the queue was last changed
+ // in seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)).
+ //
+ // * MaximumMessageSize - Returns the limit of how many bytes a message can
+ // contain before Amazon SQS rejects it.
+ //
+ // * MessageRetentionPeriod - Returns the length of time, in seconds, for
+ // which Amazon SQS retains a message.
+ //
+ // * Policy - Returns the policy of the queue.
+ //
+ // * QueueArn - Returns the Amazon resource name (ARN) of the queue.
+ //
+ // * ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds,
+ // for which the ReceiveMessage action waits for a message to arrive.
+ //
+ // * RedrivePolicy - Returns the string that includes the parameters for
+ // dead-letter queue functionality of the source queue. For more information
+ // about the redrive policy and dead-letter queues, see Using Amazon SQS
+ // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html)
+ // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn
+ // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon
+ // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount
+ // - The number of times a message is delivered to the source queue before
+ // being moved to the dead-letter queue. When the ReceiveCount for a message
+ // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message
+ // to the dead-letter-queue.
+ //
+ // * VisibilityTimeout - Returns the visibility timeout for the queue. For
+ // more information about the visibility timeout, see Visibility Timeout
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html):
+ //
+ // * KmsMasterKeyId - Returns the ID of an AWS-managed customer master key
+ // (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms).
+ //
+ // * KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds,
+ // for which Amazon SQS can reuse a data key to encrypt or decrypt messages
+ // before calling AWS KMS again. For more information, see How Does the Data
+ // Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work).
+ //
+ // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html):
+ //
+ // * FifoQueue - Returns whether the queue is FIFO. For more information,
+ // see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic)
+ // in the Amazon Simple Queue Service Developer Guide. To determine whether
+ // a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html),
+ // you can check whether QueueName ends with the .fifo suffix.
+ //
+ // * ContentBasedDeduplication - Returns whether content-based deduplication
+ // is enabled for the queue. For more information, see Exactly-Once Processing
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)
+ // in the Amazon Simple Queue Service Developer Guide.
+ AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"`
+
+ // The URL of the Amazon SQS queue whose attribute information is retrieved.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetQueueAttributesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetQueueAttributesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetQueueAttributesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetQueueAttributesInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeNames sets the AttributeNames field's value.
+func (s *GetQueueAttributesInput) SetAttributeNames(v []*string) *GetQueueAttributesInput {
+ s.AttributeNames = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *GetQueueAttributesInput) SetQueueUrl(v string) *GetQueueAttributesInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// A list of returned queue attributes.
+type GetQueueAttributesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attributes to their respective values.
+ Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetQueueAttributesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetQueueAttributesOutput) GoString() string {
+ return s.String()
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *GetQueueAttributesOutput) SetAttributes(v map[string]*string) *GetQueueAttributesOutput {
+ s.Attributes = v
+ return s
+}
+
+type GetQueueUrlInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the queue whose URL must be fetched. Maximum 80 characters. Valid
+ // values: alphanumeric characters, hyphens (-), and underscores (_).
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueName is a required field
+ QueueName *string `type:"string" required:"true"`
+
+ // The AWS account ID of the account that created the queue.
+ QueueOwnerAWSAccountId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetQueueUrlInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetQueueUrlInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetQueueUrlInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetQueueUrlInput"}
+ if s.QueueName == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueName sets the QueueName field's value.
+func (s *GetQueueUrlInput) SetQueueName(v string) *GetQueueUrlInput {
+ s.QueueName = &v
+ return s
+}
+
+// SetQueueOwnerAWSAccountId sets the QueueOwnerAWSAccountId field's value.
+func (s *GetQueueUrlInput) SetQueueOwnerAWSAccountId(v string) *GetQueueUrlInput {
+ s.QueueOwnerAWSAccountId = &v
+ return s
+}
+
+// For more information, see Interpreting Responses (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-responses.html)
+// in the Amazon Simple Queue Service Developer Guide.
+type GetQueueUrlOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the queue.
+ QueueUrl *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetQueueUrlOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetQueueUrlOutput) GoString() string {
+ return s.String()
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *GetQueueUrlOutput) SetQueueUrl(v string) *GetQueueUrlOutput {
+ s.QueueUrl = &v
+ return s
+}
+
+type ListDeadLetterSourceQueuesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of a dead-letter queue.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListDeadLetterSourceQueuesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListDeadLetterSourceQueuesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListDeadLetterSourceQueuesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListDeadLetterSourceQueuesInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *ListDeadLetterSourceQueuesInput) SetQueueUrl(v string) *ListDeadLetterSourceQueuesInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// A list of your dead letter source queues.
+type ListDeadLetterSourceQueuesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of source queue URLs that have the RedrivePolicy queue attribute configured
+ // with a dead-letter queue.
+ //
+ // QueueUrls is a required field
+ QueueUrls []*string `locationName:"queueUrls" locationNameList:"QueueUrl" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ListDeadLetterSourceQueuesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListDeadLetterSourceQueuesOutput) GoString() string {
+ return s.String()
+}
+
+// SetQueueUrls sets the QueueUrls field's value.
+func (s *ListDeadLetterSourceQueuesOutput) SetQueueUrls(v []*string) *ListDeadLetterSourceQueuesOutput {
+ s.QueueUrls = v
+ return s
+}
+
+type ListQueueTagsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the queue.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListQueueTagsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListQueueTagsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListQueueTagsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListQueueTagsInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *ListQueueTagsInput) SetQueueUrl(v string) *ListQueueTagsInput {
+ s.QueueUrl = &v
+ return s
+}
+
+type ListQueueTagsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of all tags added to the specified queue.
+ Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListQueueTagsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListQueueTagsOutput) GoString() string {
+ return s.String()
+}
+
+// SetTags sets the Tags field's value.
+func (s *ListQueueTagsOutput) SetTags(v map[string]*string) *ListQueueTagsOutput {
+ s.Tags = v
+ return s
+}
+
+type ListQueuesInput struct {
+ _ struct{} `type:"structure"`
+
+ // A string to use for filtering the list results. Only those queues whose name
+ // begins with the specified string are returned.
+ //
+ // Queue URLs and names are case-sensitive.
+ QueueNamePrefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListQueuesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListQueuesInput) GoString() string {
+ return s.String()
+}
+
+// SetQueueNamePrefix sets the QueueNamePrefix field's value.
+func (s *ListQueuesInput) SetQueueNamePrefix(v string) *ListQueuesInput {
+ s.QueueNamePrefix = &v
+ return s
+}
+
+// A list of your queues.
+type ListQueuesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of queue URLs, up to 1,000 entries.
+ QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListQueuesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListQueuesOutput) GoString() string {
+ return s.String()
+}
+
+// SetQueueUrls sets the QueueUrls field's value.
+func (s *ListQueuesOutput) SetQueueUrls(v []*string) *ListQueuesOutput {
+ s.QueueUrls = v
+ return s
+}
+
+// An Amazon SQS message.
+type Message struct {
+ _ struct{} `type:"structure"`
+
+ // A map of the attributes requested in ReceiveMessage to their respective values.
+ // Supported attributes:
+ //
+ // * ApproximateReceiveCount
+ //
+ // * ApproximateFirstReceiveTimestamp
+ //
+ // * MessageDeduplicationId
+ //
+ // * MessageGroupId
+ //
+ // * SenderId
+ //
+ // * SentTimestamp
+ //
+ // * SequenceNumber
+ //
+ // ApproximateFirstReceiveTimestamp and SentTimestamp are each returned as an
+ // integer representing the epoch time (http://en.wikipedia.org/wiki/Unix_time)
+ // in milliseconds.
+ Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ // The message's contents (not URL-encoded).
+ Body *string `type:"string"`
+
+ // An MD5 digest of the non-URL-encoded message body string.
+ MD5OfBody *string `type:"string"`
+
+ // An MD5 digest of the non-URL-encoded message attribute string. You can use
+ // this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest. For information
+ // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt).
+ MD5OfMessageAttributes *string `type:"string"`
+
+ // Each message attribute consists of a Name, Type, and Value. For more information,
+ // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ // A unique identifier for the message. A MessageIdis considered unique across
+ // all AWS accounts for an extended period of time.
+ MessageId *string `type:"string"`
+
+ // An identifier associated with the act of receiving the message. A new receipt
+ // handle is returned every time you receive a message. When deleting a message,
+ // you provide the last received receipt handle to delete the message.
+ ReceiptHandle *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Message) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Message) GoString() string {
+ return s.String()
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *Message) SetAttributes(v map[string]*string) *Message {
+ s.Attributes = v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *Message) SetBody(v string) *Message {
+ s.Body = &v
+ return s
+}
+
+// SetMD5OfBody sets the MD5OfBody field's value.
+func (s *Message) SetMD5OfBody(v string) *Message {
+ s.MD5OfBody = &v
+ return s
+}
+
+// SetMD5OfMessageAttributes sets the MD5OfMessageAttributes field's value.
+func (s *Message) SetMD5OfMessageAttributes(v string) *Message {
+ s.MD5OfMessageAttributes = &v
+ return s
+}
+
+// SetMessageAttributes sets the MessageAttributes field's value.
+func (s *Message) SetMessageAttributes(v map[string]*MessageAttributeValue) *Message {
+ s.MessageAttributes = v
+ return s
+}
+
+// SetMessageId sets the MessageId field's value.
+func (s *Message) SetMessageId(v string) *Message {
+ s.MessageId = &v
+ return s
+}
+
+// SetReceiptHandle sets the ReceiptHandle field's value.
+func (s *Message) SetReceiptHandle(v string) *Message {
+ s.ReceiptHandle = &v
+ return s
+}
+
+// The user-specified message attribute value. For string data types, the Value
+// attribute has the same restrictions on the content as the message body. For
+// more information, see SendMessage.
+//
+// Name, type, value and the message body must not be empty or null. All parts
+// of the message attribute, including Name, Type, and Value, are part of the
+// message size restriction (256 KB or 262,144 bytes).
+type MessageAttributeValue struct {
+ _ struct{} `type:"structure"`
+
+ // Not implemented. Reserved for future use.
+ BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"`
+
+ // Binary type attributes can store any binary data, such as compressed data,
+ // encrypted data, or images.
+ //
+ // BinaryValue is automatically base64 encoded/decoded by the SDK.
+ BinaryValue []byte `type:"blob"`
+
+ // Amazon SQS supports the following logical data types: String, Number, and
+ // Binary. For the Number data type, you must use StringValue.
+ //
+ // You can also append custom labels. For more information, see Amazon SQS Message
+ // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // DataType is a required field
+ DataType *string `type:"string" required:"true"`
+
+ // Not implemented. Reserved for future use.
+ StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"`
+
+ // Strings are Unicode with UTF-8 binary encoding. For a list of code values,
+ // see ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
+ StringValue *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MessageAttributeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MessageAttributeValue) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MessageAttributeValue) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MessageAttributeValue"}
+ if s.DataType == nil {
+ invalidParams.Add(request.NewErrParamRequired("DataType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBinaryListValues sets the BinaryListValues field's value.
+func (s *MessageAttributeValue) SetBinaryListValues(v [][]byte) *MessageAttributeValue {
+ s.BinaryListValues = v
+ return s
+}
+
+// SetBinaryValue sets the BinaryValue field's value.
+func (s *MessageAttributeValue) SetBinaryValue(v []byte) *MessageAttributeValue {
+ s.BinaryValue = v
+ return s
+}
+
+// SetDataType sets the DataType field's value.
+func (s *MessageAttributeValue) SetDataType(v string) *MessageAttributeValue {
+ s.DataType = &v
+ return s
+}
+
+// SetStringListValues sets the StringListValues field's value.
+func (s *MessageAttributeValue) SetStringListValues(v []*string) *MessageAttributeValue {
+ s.StringListValues = v
+ return s
+}
+
+// SetStringValue sets the StringValue field's value.
+func (s *MessageAttributeValue) SetStringValue(v string) *MessageAttributeValue {
+ s.StringValue = &v
+ return s
+}
+
+// The user-specified message system attribute value. For string data types,
+// the Value attribute has the same restrictions on the content as the message
+// body. For more information, see SendMessage.
+//
+// Name, type, value and the message body must not be empty or null.
+type MessageSystemAttributeValue struct {
+ _ struct{} `type:"structure"`
+
+ // Not implemented. Reserved for future use.
+ BinaryListValues [][]byte `locationName:"BinaryListValue" locationNameList:"BinaryListValue" type:"list" flattened:"true"`
+
+ // Binary type attributes can store any binary data, such as compressed data,
+ // encrypted data, or images.
+ //
+ // BinaryValue is automatically base64 encoded/decoded by the SDK.
+ BinaryValue []byte `type:"blob"`
+
+ // Amazon SQS supports the following logical data types: String, Number, and
+ // Binary. For the Number data type, you must use StringValue.
+ //
+ // You can also append custom labels. For more information, see Amazon SQS Message
+ // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // DataType is a required field
+ DataType *string `type:"string" required:"true"`
+
+ // Not implemented. Reserved for future use.
+ StringListValues []*string `locationName:"StringListValue" locationNameList:"StringListValue" type:"list" flattened:"true"`
+
+ // Strings are Unicode with UTF-8 binary encoding. For a list of code values,
+ // see ASCII Printable Characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
+ StringValue *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MessageSystemAttributeValue) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MessageSystemAttributeValue) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MessageSystemAttributeValue) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MessageSystemAttributeValue"}
+ if s.DataType == nil {
+ invalidParams.Add(request.NewErrParamRequired("DataType"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBinaryListValues sets the BinaryListValues field's value.
+func (s *MessageSystemAttributeValue) SetBinaryListValues(v [][]byte) *MessageSystemAttributeValue {
+ s.BinaryListValues = v
+ return s
+}
+
+// SetBinaryValue sets the BinaryValue field's value.
+func (s *MessageSystemAttributeValue) SetBinaryValue(v []byte) *MessageSystemAttributeValue {
+ s.BinaryValue = v
+ return s
+}
+
+// SetDataType sets the DataType field's value.
+func (s *MessageSystemAttributeValue) SetDataType(v string) *MessageSystemAttributeValue {
+ s.DataType = &v
+ return s
+}
+
+// SetStringListValues sets the StringListValues field's value.
+func (s *MessageSystemAttributeValue) SetStringListValues(v []*string) *MessageSystemAttributeValue {
+ s.StringListValues = v
+ return s
+}
+
+// SetStringValue sets the StringValue field's value.
+func (s *MessageSystemAttributeValue) SetStringValue(v string) *MessageSystemAttributeValue {
+ s.StringValue = &v
+ return s
+}
+
+type PurgeQueueInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the queue from which the PurgeQueue action deletes messages.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PurgeQueueInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PurgeQueueInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PurgeQueueInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PurgeQueueInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *PurgeQueueInput) SetQueueUrl(v string) *PurgeQueueInput {
+ s.QueueUrl = &v
+ return s
+}
+
+type PurgeQueueOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PurgeQueueOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PurgeQueueOutput) GoString() string {
+ return s.String()
+}
+
+type ReceiveMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of attributes that need to be returned along with each message. These
+ // attributes include:
+ //
+ // * All - Returns all values.
+ //
+ // * ApproximateFirstReceiveTimestamp - Returns the time the message was
+ // first received from the queue (epoch time (http://en.wikipedia.org/wiki/Unix_time)
+ // in milliseconds).
+ //
+ // * ApproximateReceiveCount - Returns the number of times a message has
+ // been received from the queue but not deleted.
+ //
+ // * AWSTraceHeader - Returns the AWS X-Ray trace header string.
+ //
+ // * SenderId For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R.
+ // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456.
+ //
+ // * SentTimestamp - Returns the time the message was sent to the queue (epoch
+ // time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds).
+ //
+ // * MessageDeduplicationId - Returns the value provided by the producer
+ // that calls the SendMessage action.
+ //
+ // * MessageGroupId - Returns the value provided by the producer that calls
+ // the SendMessage action. Messages with the same MessageGroupId are returned
+ // in sequence.
+ //
+ // * SequenceNumber - Returns the value provided by Amazon SQS.
+ AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"`
+
+ // The maximum number of messages to return. Amazon SQS never returns more messages
+ // than this value (however, fewer messages might be returned). Valid values:
+ // 1 to 10. Default: 1.
+ MaxNumberOfMessages *int64 `type:"integer"`
+
+ // The name of the message attribute, where N is the index.
+ //
+ // * The name can contain alphanumeric characters and the underscore (_),
+ // hyphen (-), and period (.).
+ //
+ // * The name is case-sensitive and must be unique among all attribute names
+ // for the message.
+ //
+ // * The name must not start with AWS-reserved prefixes such as AWS. or Amazon.
+ // (or any casing variants).
+ //
+ // * The name must not start or end with a period (.), and it should not
+ // have periods in succession (..).
+ //
+ // * The name can be up to 256 characters long.
+ //
+ // When using ReceiveMessage, you can send a list of attribute names to receive,
+ // or you can return all of the attributes by specifying All or .* in your request.
+ // You can also use all message attributes starting with a prefix, for example
+ // bar.*.
+ MessageAttributeNames []*string `locationNameList:"MessageAttributeName" type:"list" flattened:"true"`
+
+ // The URL of the Amazon SQS queue from which messages are received.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The token used for deduplication of ReceiveMessage calls. If a networking
+ // issue occurs after a ReceiveMessage action, and instead of a response you
+ // receive a generic error, you can retry the same action with an identical
+ // ReceiveRequestAttemptId to retrieve the same set of messages, even if their
+ // visibility timeout has not yet expired.
+ //
+ // * You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage
+ // action.
+ //
+ // * When you set FifoQueue, a caller of the ReceiveMessage action can provide
+ // a ReceiveRequestAttemptId explicitly.
+ //
+ // * If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId,
+ // Amazon SQS generates a ReceiveRequestAttemptId.
+ //
+ // * You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId
+ // if none of the messages have been modified (deleted or had their visibility
+ // changes).
+ //
+ // * During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId
+ // return the same messages and receipt handles. If a retry occurs within
+ // the deduplication interval, it resets the visibility timeout. For more
+ // information, see Visibility Timeout (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
+ // in the Amazon Simple Queue Service Developer Guide. If a caller of the
+ // ReceiveMessage action still processes messages when the visibility timeout
+ // expires and messages become visible, another worker consuming from the
+ // same queue can receive the same messages and therefore process duplicates.
+ // Also, if a consumer whose message processing time is longer than the visibility
+ // timeout tries to delete the processed messages, the action fails with
+ // an error. To mitigate this effect, ensure that your application observes
+ // a safe threshold before the visibility timeout expires and extend the
+ // visibility timeout as necessary.
+ //
+ // * While messages with a particular MessageGroupId are invisible, no more
+ // messages belonging to the same MessageGroupId are returned until the visibility
+ // timeout expires. You can still receive messages with another MessageGroupId
+ // as long as it is also visible.
+ //
+ // * If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId,
+ // no retries work until the original visibility timeout expires. As a result,
+ // delays might occur but the messages in the queue remain in a strict order.
+ //
+ // The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId
+ // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).
+ //
+ // For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId
+ // Request Parameter (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-receiverequestattemptid-request-parameter.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ ReceiveRequestAttemptId *string `type:"string"`
+
+ // The duration (in seconds) that the received messages are hidden from subsequent
+ // retrieve requests after being retrieved by a ReceiveMessage request.
+ VisibilityTimeout *int64 `type:"integer"`
+
+ // The duration (in seconds) for which the call waits for a message to arrive
+ // in the queue before returning. If a message is available, the call returns
+ // sooner than WaitTimeSeconds. If no messages are available and the wait time
+ // expires, the call returns successfully with an empty list of messages.
+ WaitTimeSeconds *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s ReceiveMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReceiveMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReceiveMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReceiveMessageInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributeNames sets the AttributeNames field's value.
+func (s *ReceiveMessageInput) SetAttributeNames(v []*string) *ReceiveMessageInput {
+ s.AttributeNames = v
+ return s
+}
+
+// SetMaxNumberOfMessages sets the MaxNumberOfMessages field's value.
+func (s *ReceiveMessageInput) SetMaxNumberOfMessages(v int64) *ReceiveMessageInput {
+ s.MaxNumberOfMessages = &v
+ return s
+}
+
+// SetMessageAttributeNames sets the MessageAttributeNames field's value.
+func (s *ReceiveMessageInput) SetMessageAttributeNames(v []*string) *ReceiveMessageInput {
+ s.MessageAttributeNames = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *ReceiveMessageInput) SetQueueUrl(v string) *ReceiveMessageInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// SetReceiveRequestAttemptId sets the ReceiveRequestAttemptId field's value.
+func (s *ReceiveMessageInput) SetReceiveRequestAttemptId(v string) *ReceiveMessageInput {
+ s.ReceiveRequestAttemptId = &v
+ return s
+}
+
+// SetVisibilityTimeout sets the VisibilityTimeout field's value.
+func (s *ReceiveMessageInput) SetVisibilityTimeout(v int64) *ReceiveMessageInput {
+ s.VisibilityTimeout = &v
+ return s
+}
+
+// SetWaitTimeSeconds sets the WaitTimeSeconds field's value.
+func (s *ReceiveMessageInput) SetWaitTimeSeconds(v int64) *ReceiveMessageInput {
+ s.WaitTimeSeconds = &v
+ return s
+}
+
+// A list of received messages.
+type ReceiveMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of messages.
+ Messages []*Message `locationNameList:"Message" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ReceiveMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReceiveMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetMessages sets the Messages field's value.
+func (s *ReceiveMessageOutput) SetMessages(v []*Message) *ReceiveMessageOutput {
+ s.Messages = v
+ return s
+}
+
+type RemovePermissionInput struct {
+ _ struct{} `type:"structure"`
+
+ // The identification of the permission to remove. This is the label added using
+ // the AddPermission action.
+ //
+ // Label is a required field
+ Label *string `type:"string" required:"true"`
+
+ // The URL of the Amazon SQS queue from which permissions are removed.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s RemovePermissionInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RemovePermissionInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RemovePermissionInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RemovePermissionInput"}
+ if s.Label == nil {
+ invalidParams.Add(request.NewErrParamRequired("Label"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLabel sets the Label field's value.
+func (s *RemovePermissionInput) SetLabel(v string) *RemovePermissionInput {
+ s.Label = &v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *RemovePermissionInput) SetQueueUrl(v string) *RemovePermissionInput {
+ s.QueueUrl = &v
+ return s
+}
+
+type RemovePermissionOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s RemovePermissionOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RemovePermissionOutput) GoString() string {
+ return s.String()
+}
+
+type SendMessageBatchInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of SendMessageBatchRequestEntry items.
+ //
+ // Entries is a required field
+ Entries []*SendMessageBatchRequestEntry `locationNameList:"SendMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"`
+
+ // The URL of the Amazon SQS queue to which batched messages are sent.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s SendMessageBatchInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendMessageBatchInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SendMessageBatchInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SendMessageBatchInput"}
+ if s.Entries == nil {
+ invalidParams.Add(request.NewErrParamRequired("Entries"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.Entries != nil {
+ for i, v := range s.Entries {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEntries sets the Entries field's value.
+func (s *SendMessageBatchInput) SetEntries(v []*SendMessageBatchRequestEntry) *SendMessageBatchInput {
+ s.Entries = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *SendMessageBatchInput) SetQueueUrl(v string) *SendMessageBatchInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// For each message in the batch, the response contains a SendMessageBatchResultEntry
+// tag if the message succeeds or a BatchResultErrorEntry tag if the message
+// fails.
+type SendMessageBatchOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of BatchResultErrorEntry items with error details about each message
+ // that can't be enqueued.
+ //
+ // Failed is a required field
+ Failed []*BatchResultErrorEntry `locationNameList:"BatchResultErrorEntry" type:"list" flattened:"true" required:"true"`
+
+ // A list of SendMessageBatchResultEntry items.
+ //
+ // Successful is a required field
+ Successful []*SendMessageBatchResultEntry `locationNameList:"SendMessageBatchResultEntry" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s SendMessageBatchOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendMessageBatchOutput) GoString() string {
+ return s.String()
+}
+
+// SetFailed sets the Failed field's value.
+func (s *SendMessageBatchOutput) SetFailed(v []*BatchResultErrorEntry) *SendMessageBatchOutput {
+ s.Failed = v
+ return s
+}
+
+// SetSuccessful sets the Successful field's value.
+func (s *SendMessageBatchOutput) SetSuccessful(v []*SendMessageBatchResultEntry) *SendMessageBatchOutput {
+ s.Successful = v
+ return s
+}
+
+// Contains the details of a single Amazon SQS message along with an Id.
+type SendMessageBatchRequestEntry struct {
+ _ struct{} `type:"structure"`
+
+ // The length of time, in seconds, for which a specific message is delayed.
+ // Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds
+ // value become available for processing after the delay period is finished.
+ // If you don't specify a value, the default value for the queue is applied.
+ //
+ // When you set FifoQueue, you can't set DelaySeconds per message. You can set
+ // this parameter only on a queue level.
+ DelaySeconds *int64 `type:"integer"`
+
+ // An identifier for a message in this batch used to communicate the result.
+ //
+ // The Ids of a batch request need to be unique within a request
+ //
+ // This identifier can have up to 80 characters. The following characters are
+ // accepted: alphanumeric characters, hyphens(-), and underscores (_).
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // Each message attribute consists of a Name, Type, and Value. For more information,
+ // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ // The body of the message.
+ //
+ // MessageBody is a required field
+ MessageBody *string `type:"string" required:"true"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The token used for deduplication of messages within a 5-minute minimum deduplication
+ // interval. If a message with a particular MessageDeduplicationId is sent successfully,
+ // subsequent messages with the same MessageDeduplicationId are accepted successfully
+ // but aren't delivered. For more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // * Every message must have a unique MessageDeduplicationId, You may provide
+ // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId
+ // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses
+ // a SHA-256 hash to generate the MessageDeduplicationId using the body of
+ // the message (but not the attributes of the message). If you don't provide
+ // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication
+ // set, the action fails with an error. If the queue has ContentBasedDeduplication
+ // set, your MessageDeduplicationId overrides the generated one.
+ //
+ // * When ContentBasedDeduplication is in effect, messages with identical
+ // content sent within the deduplication interval are treated as duplicates
+ // and only one copy of the message is delivered.
+ //
+ // * If you send one message with ContentBasedDeduplication enabled and then
+ // another message with a MessageDeduplicationId that is the same as the
+ // one generated for the first MessageDeduplicationId, the two messages are
+ // treated as duplicates and only one copy of the message is delivered.
+ //
+ // The MessageDeduplicationId is available to the consumer of the message (this
+ // can be useful for troubleshooting delivery issues).
+ //
+ // If a message is sent successfully but the acknowledgement is lost and the
+ // message is resent with the same MessageDeduplicationId after the deduplication
+ // interval, Amazon SQS can't detect duplicate messages.
+ //
+ // Amazon SQS continues to keep track of the message deduplication ID even after
+ // the message is received and deleted.
+ //
+ // The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId
+ // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).
+ //
+ // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId
+ // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ MessageDeduplicationId *string `type:"string"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The tag that specifies that a message belongs to a specific message group.
+ // Messages that belong to the same message group are processed in a FIFO manner
+ // (however, messages in different message groups might be processed out of
+ // order). To interleave multiple ordered streams within a single queue, use
+ // MessageGroupId values (for example, session data for multiple users). In
+ // this scenario, multiple consumers can process the queue, but the session
+ // data of each user is processed in a FIFO fashion.
+ //
+ // * You must associate a non-empty MessageGroupId with a message. If you
+ // don't provide a MessageGroupId, the action fails.
+ //
+ // * ReceiveMessage might return messages with multiple MessageGroupId values.
+ // For each MessageGroupId, the messages are sorted by time sent. The caller
+ // can't specify a MessageGroupId.
+ //
+ // The length of MessageGroupId is 128 characters. Valid values: alphanumeric
+ // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).
+ //
+ // For best practices of using MessageGroupId, see Using the MessageGroupId
+ // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // MessageGroupId is required for FIFO queues. You can't use it for Standard
+ // queues.
+ MessageGroupId *string `type:"string"`
+
+ // The message system attribute to send Each message system attribute consists
+ // of a Name, Type, and Value.
+ //
+ // * Currently, the only supported message system attribute is AWSTraceHeader.
+ // Its type must be String and its value must be a correctly formatted AWS
+ // X-Ray trace string.
+ //
+ // * The size of a message system attribute doesn't count towards the total
+ // size of a message.
+ MessageSystemAttributes map[string]*MessageSystemAttributeValue `locationName:"MessageSystemAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+}
+
+// String returns the string representation
+func (s SendMessageBatchRequestEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendMessageBatchRequestEntry) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SendMessageBatchRequestEntry) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SendMessageBatchRequestEntry"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.MessageBody == nil {
+ invalidParams.Add(request.NewErrParamRequired("MessageBody"))
+ }
+ if s.MessageAttributes != nil {
+ for i, v := range s.MessageAttributes {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.MessageSystemAttributes != nil {
+ for i, v := range s.MessageSystemAttributes {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageSystemAttributes", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDelaySeconds sets the DelaySeconds field's value.
+func (s *SendMessageBatchRequestEntry) SetDelaySeconds(v int64) *SendMessageBatchRequestEntry {
+ s.DelaySeconds = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *SendMessageBatchRequestEntry) SetId(v string) *SendMessageBatchRequestEntry {
+ s.Id = &v
+ return s
+}
+
+// SetMessageAttributes sets the MessageAttributes field's value.
+func (s *SendMessageBatchRequestEntry) SetMessageAttributes(v map[string]*MessageAttributeValue) *SendMessageBatchRequestEntry {
+ s.MessageAttributes = v
+ return s
+}
+
+// SetMessageBody sets the MessageBody field's value.
+func (s *SendMessageBatchRequestEntry) SetMessageBody(v string) *SendMessageBatchRequestEntry {
+ s.MessageBody = &v
+ return s
+}
+
+// SetMessageDeduplicationId sets the MessageDeduplicationId field's value.
+func (s *SendMessageBatchRequestEntry) SetMessageDeduplicationId(v string) *SendMessageBatchRequestEntry {
+ s.MessageDeduplicationId = &v
+ return s
+}
+
+// SetMessageGroupId sets the MessageGroupId field's value.
+func (s *SendMessageBatchRequestEntry) SetMessageGroupId(v string) *SendMessageBatchRequestEntry {
+ s.MessageGroupId = &v
+ return s
+}
+
+// SetMessageSystemAttributes sets the MessageSystemAttributes field's value.
+func (s *SendMessageBatchRequestEntry) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageBatchRequestEntry {
+ s.MessageSystemAttributes = v
+ return s
+}
+
+// Encloses a MessageId for a successfully-enqueued message in a SendMessageBatch.
+type SendMessageBatchResultEntry struct {
+ _ struct{} `type:"structure"`
+
+ // An identifier for the message in this batch.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // An MD5 digest of the non-URL-encoded message attribute string. You can use
+ // this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest. For information
+ // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt).
+ MD5OfMessageAttributes *string `type:"string"`
+
+ // An MD5 digest of the non-URL-encoded message attribute string. You can use
+ // this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest. For information
+ // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt).
+ //
+ // MD5OfMessageBody is a required field
+ MD5OfMessageBody *string `type:"string" required:"true"`
+
+ // An MD5 digest of the non-URL-encoded message system attribute string. You
+ // can use this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest. For information
+ // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt).
+ MD5OfMessageSystemAttributes *string `type:"string"`
+
+ // An identifier for the message.
+ //
+ // MessageId is a required field
+ MessageId *string `type:"string" required:"true"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The large, non-consecutive number that Amazon SQS assigns to each message.
+ //
+ // The length of SequenceNumber is 128 bits. As SequenceNumber continues to
+ // increase for a particular MessageGroupId.
+ SequenceNumber *string `type:"string"`
+}
+
+// String returns the string representation
+func (s SendMessageBatchResultEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendMessageBatchResultEntry) GoString() string {
+ return s.String()
+}
+
+// SetId sets the Id field's value.
+func (s *SendMessageBatchResultEntry) SetId(v string) *SendMessageBatchResultEntry {
+ s.Id = &v
+ return s
+}
+
+// SetMD5OfMessageAttributes sets the MD5OfMessageAttributes field's value.
+func (s *SendMessageBatchResultEntry) SetMD5OfMessageAttributes(v string) *SendMessageBatchResultEntry {
+ s.MD5OfMessageAttributes = &v
+ return s
+}
+
+// SetMD5OfMessageBody sets the MD5OfMessageBody field's value.
+func (s *SendMessageBatchResultEntry) SetMD5OfMessageBody(v string) *SendMessageBatchResultEntry {
+ s.MD5OfMessageBody = &v
+ return s
+}
+
+// SetMD5OfMessageSystemAttributes sets the MD5OfMessageSystemAttributes field's value.
+func (s *SendMessageBatchResultEntry) SetMD5OfMessageSystemAttributes(v string) *SendMessageBatchResultEntry {
+ s.MD5OfMessageSystemAttributes = &v
+ return s
+}
+
+// SetMessageId sets the MessageId field's value.
+func (s *SendMessageBatchResultEntry) SetMessageId(v string) *SendMessageBatchResultEntry {
+ s.MessageId = &v
+ return s
+}
+
+// SetSequenceNumber sets the SequenceNumber field's value.
+func (s *SendMessageBatchResultEntry) SetSequenceNumber(v string) *SendMessageBatchResultEntry {
+ s.SequenceNumber = &v
+ return s
+}
+
+type SendMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The length of time, in seconds, for which to delay a specific message. Valid
+ // values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds
+ // value become available for processing after the delay period is finished.
+ // If you don't specify a value, the default value for the queue applies.
+ //
+ // When you set FifoQueue, you can't set DelaySeconds per message. You can set
+ // this parameter only on a queue level.
+ DelaySeconds *int64 `type:"integer"`
+
+ // Each message attribute consists of a Name, Type, and Value. For more information,
+ // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ // The message to send. The maximum string size is 256 KB.
+ //
+ // A message can include only XML, JSON, and unformatted text. The following
+ // Unicode characters are allowed:
+ //
+ // #x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
+ //
+ // Any characters not included in this list will be rejected. For more information,
+ // see the W3C specification for characters (http://www.w3.org/TR/REC-xml/#charsets).
+ //
+ // MessageBody is a required field
+ MessageBody *string `type:"string" required:"true"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The token used for deduplication of sent messages. If a message with a particular
+ // MessageDeduplicationId is sent successfully, any messages sent with the same
+ // MessageDeduplicationId are accepted successfully but aren't delivered during
+ // the 5-minute deduplication interval. For more information, see Exactly-Once
+ // Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // * Every message must have a unique MessageDeduplicationId, You may provide
+ // a MessageDeduplicationId explicitly. If you aren't able to provide a MessageDeduplicationId
+ // and you enable ContentBasedDeduplication for your queue, Amazon SQS uses
+ // a SHA-256 hash to generate the MessageDeduplicationId using the body of
+ // the message (but not the attributes of the message). If you don't provide
+ // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication
+ // set, the action fails with an error. If the queue has ContentBasedDeduplication
+ // set, your MessageDeduplicationId overrides the generated one.
+ //
+ // * When ContentBasedDeduplication is in effect, messages with identical
+ // content sent within the deduplication interval are treated as duplicates
+ // and only one copy of the message is delivered.
+ //
+ // * If you send one message with ContentBasedDeduplication enabled and then
+ // another message with a MessageDeduplicationId that is the same as the
+ // one generated for the first MessageDeduplicationId, the two messages are
+ // treated as duplicates and only one copy of the message is delivered.
+ //
+ // The MessageDeduplicationId is available to the consumer of the message (this
+ // can be useful for troubleshooting delivery issues).
+ //
+ // If a message is sent successfully but the acknowledgement is lost and the
+ // message is resent with the same MessageDeduplicationId after the deduplication
+ // interval, Amazon SQS can't detect duplicate messages.
+ //
+ // Amazon SQS continues to keep track of the message deduplication ID even after
+ // the message is received and deleted.
+ //
+ // The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId
+ // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).
+ //
+ // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId
+ // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ MessageDeduplicationId *string `type:"string"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The tag that specifies that a message belongs to a specific message group.
+ // Messages that belong to the same message group are processed in a FIFO manner
+ // (however, messages in different message groups might be processed out of
+ // order). To interleave multiple ordered streams within a single queue, use
+ // MessageGroupId values (for example, session data for multiple users). In
+ // this scenario, multiple consumers can process the queue, but the session
+ // data of each user is processed in a FIFO fashion.
+ //
+ // * You must associate a non-empty MessageGroupId with a message. If you
+ // don't provide a MessageGroupId, the action fails.
+ //
+ // * ReceiveMessage might return messages with multiple MessageGroupId values.
+ // For each MessageGroupId, the messages are sorted by time sent. The caller
+ // can't specify a MessageGroupId.
+ //
+ // The length of MessageGroupId is 128 characters. Valid values: alphanumeric
+ // characters and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~).
+ //
+ // For best practices of using MessageGroupId, see Using the MessageGroupId
+ // Property (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // MessageGroupId is required for FIFO queues. You can't use it for Standard
+ // queues.
+ MessageGroupId *string `type:"string"`
+
+ // The message system attribute to send. Each message system attribute consists
+ // of a Name, Type, and Value.
+ //
+ // * Currently, the only supported message system attribute is AWSTraceHeader.
+ // Its type must be String and its value must be a correctly formatted AWS
+ // X-Ray trace string.
+ //
+ // * The size of a message system attribute doesn't count towards the total
+ // size of a message.
+ MessageSystemAttributes map[string]*MessageSystemAttributeValue `locationName:"MessageSystemAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"`
+
+ // The URL of the Amazon SQS queue to which a message is sent.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s SendMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SendMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SendMessageInput"}
+ if s.MessageBody == nil {
+ invalidParams.Add(request.NewErrParamRequired("MessageBody"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.MessageAttributes != nil {
+ for i, v := range s.MessageAttributes {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageAttributes", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.MessageSystemAttributes != nil {
+ for i, v := range s.MessageSystemAttributes {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MessageSystemAttributes", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDelaySeconds sets the DelaySeconds field's value.
+func (s *SendMessageInput) SetDelaySeconds(v int64) *SendMessageInput {
+ s.DelaySeconds = &v
+ return s
+}
+
+// SetMessageAttributes sets the MessageAttributes field's value.
+func (s *SendMessageInput) SetMessageAttributes(v map[string]*MessageAttributeValue) *SendMessageInput {
+ s.MessageAttributes = v
+ return s
+}
+
+// SetMessageBody sets the MessageBody field's value.
+func (s *SendMessageInput) SetMessageBody(v string) *SendMessageInput {
+ s.MessageBody = &v
+ return s
+}
+
+// SetMessageDeduplicationId sets the MessageDeduplicationId field's value.
+func (s *SendMessageInput) SetMessageDeduplicationId(v string) *SendMessageInput {
+ s.MessageDeduplicationId = &v
+ return s
+}
+
+// SetMessageGroupId sets the MessageGroupId field's value.
+func (s *SendMessageInput) SetMessageGroupId(v string) *SendMessageInput {
+ s.MessageGroupId = &v
+ return s
+}
+
+// SetMessageSystemAttributes sets the MessageSystemAttributes field's value.
+func (s *SendMessageInput) SetMessageSystemAttributes(v map[string]*MessageSystemAttributeValue) *SendMessageInput {
+ s.MessageSystemAttributes = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *SendMessageInput) SetQueueUrl(v string) *SendMessageInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// The MD5OfMessageBody and MessageId elements.
+type SendMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An MD5 digest of the non-URL-encoded message attribute string. You can use
+ // this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest. For information
+ // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt).
+ MD5OfMessageAttributes *string `type:"string"`
+
+ // An MD5 digest of the non-URL-encoded message attribute string. You can use
+ // this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest. For information
+ // about MD5, see RFC1321 (https://www.ietf.org/rfc/rfc1321.txt).
+ MD5OfMessageBody *string `type:"string"`
+
+ // An MD5 digest of the non-URL-encoded message system attribute string. You
+ // can use this attribute to verify that Amazon SQS received the message correctly.
+ // Amazon SQS URL-decodes the message before creating the MD5 digest.
+ MD5OfMessageSystemAttributes *string `type:"string"`
+
+ // An attribute containing the MessageId of the message sent to the queue. For
+ // more information, see Queue and Message Identifiers (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-queue-message-identifiers.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ MessageId *string `type:"string"`
+
+ // This parameter applies only to FIFO (first-in-first-out) queues.
+ //
+ // The large, non-consecutive number that Amazon SQS assigns to each message.
+ //
+ // The length of SequenceNumber is 128 bits. SequenceNumber continues to increase
+ // for a particular MessageGroupId.
+ SequenceNumber *string `type:"string"`
+}
+
+// String returns the string representation
+func (s SendMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SendMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetMD5OfMessageAttributes sets the MD5OfMessageAttributes field's value.
+func (s *SendMessageOutput) SetMD5OfMessageAttributes(v string) *SendMessageOutput {
+ s.MD5OfMessageAttributes = &v
+ return s
+}
+
+// SetMD5OfMessageBody sets the MD5OfMessageBody field's value.
+func (s *SendMessageOutput) SetMD5OfMessageBody(v string) *SendMessageOutput {
+ s.MD5OfMessageBody = &v
+ return s
+}
+
+// SetMD5OfMessageSystemAttributes sets the MD5OfMessageSystemAttributes field's value.
+func (s *SendMessageOutput) SetMD5OfMessageSystemAttributes(v string) *SendMessageOutput {
+ s.MD5OfMessageSystemAttributes = &v
+ return s
+}
+
+// SetMessageId sets the MessageId field's value.
+func (s *SendMessageOutput) SetMessageId(v string) *SendMessageOutput {
+ s.MessageId = &v
+ return s
+}
+
+// SetSequenceNumber sets the SequenceNumber field's value.
+func (s *SendMessageOutput) SetSequenceNumber(v string) *SendMessageOutput {
+ s.SequenceNumber = &v
+ return s
+}
+
+type SetQueueAttributesInput struct {
+ _ struct{} `type:"structure"`
+
+ // A map of attributes to set.
+ //
+ // The following lists the names, descriptions, and values of the special request
+ // parameters that the SetQueueAttributes action uses:
+ //
+ // * DelaySeconds - The length of time, in seconds, for which the delivery
+ // of all messages in the queue is delayed. Valid values: An integer from
+ // 0 to 900 (15 minutes). Default: 0.
+ //
+ // * MaximumMessageSize - The limit of how many bytes a message can contain
+ // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes
+ // (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB).
+ //
+ // * MessageRetentionPeriod - The length of time, in seconds, for which Amazon
+ // SQS retains a message. Valid values: An integer representing seconds,
+ // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days).
+ //
+ // * Policy - The queue's policy. A valid AWS policy. For more information
+ // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html)
+ // in the Amazon IAM User Guide.
+ //
+ // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for
+ // which a ReceiveMessage action waits for a message to arrive. Valid values:
+ // an integer from 0 to 20 (seconds). Default: 0.
+ //
+ // * RedrivePolicy - The string that includes the parameters for the dead-letter
+ // queue functionality of the source queue. For more information about the
+ // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter
+ // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html)
+ // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn
+ // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon
+ // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount
+ // - The number of times a message is delivered to the source queue before
+ // being moved to the dead-letter queue. When the ReceiveCount for a message
+ // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message
+ // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also
+ // be a FIFO queue. Similarly, the dead-letter queue of a standard queue
+ // must also be a standard queue.
+ //
+ // * VisibilityTimeout - The visibility timeout for the queue, in seconds.
+ // Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For
+ // more information about the visibility timeout, see Visibility Timeout
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html)
+ // in the Amazon Simple Queue Service Developer Guide.
+ //
+ // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html):
+ //
+ // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK)
+ // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms).
+ // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs,
+ // the alias of a custom CMK can, for example, be alias/MyAlias . For more
+ // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters)
+ // in the AWS Key Management Service API Reference.
+ //
+ // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which
+ // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys)
+ // to encrypt or decrypt messages before calling AWS KMS again. An integer
+ // representing seconds, between 60 seconds (1 minute) and 86,400 seconds
+ // (24 hours). Default: 300 (5 minutes). A shorter time period provides better
+ // security but results in more calls to KMS which might incur charges after
+ // Free Tier. For more information, see How Does the Data Key Reuse Period
+ // Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work).
+ //
+ // The following attribute applies only to FIFO (first-in-first-out) queues
+ // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html):
+ //
+ // * ContentBasedDeduplication - Enables content-based deduplication. For
+ // more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)
+ // in the Amazon Simple Queue Service Developer Guide. Every message must
+ // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId
+ // explicitly. If you aren't able to provide a MessageDeduplicationId and
+ // you enable ContentBasedDeduplication for your queue, Amazon SQS uses a
+ // SHA-256 hash to generate the MessageDeduplicationId using the body of
+ // the message (but not the attributes of the message). If you don't provide
+ // a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication
+ // set, the action fails with an error. If the queue has ContentBasedDeduplication
+ // set, your MessageDeduplicationId overrides the generated one. When ContentBasedDeduplication
+ // is in effect, messages with identical content sent within the deduplication
+ // interval are treated as duplicates and only one copy of the message is
+ // delivered. If you send one message with ContentBasedDeduplication enabled
+ // and then another message with a MessageDeduplicationId that is the same
+ // as the one generated for the first MessageDeduplicationId, the two messages
+ // are treated as duplicates and only one copy of the message is delivered.
+ //
+ // Attributes is a required field
+ Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"`
+
+ // The URL of the Amazon SQS queue whose attributes are set.
+ //
+ // Queue URLs and names are case-sensitive.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s SetQueueAttributesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetQueueAttributesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SetQueueAttributesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SetQueueAttributesInput"}
+ if s.Attributes == nil {
+ invalidParams.Add(request.NewErrParamRequired("Attributes"))
+ }
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAttributes sets the Attributes field's value.
+func (s *SetQueueAttributesInput) SetAttributes(v map[string]*string) *SetQueueAttributesInput {
+ s.Attributes = v
+ return s
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *SetQueueAttributesInput) SetQueueUrl(v string) *SetQueueAttributesInput {
+ s.QueueUrl = &v
+ return s
+}
+
+type SetQueueAttributesOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s SetQueueAttributesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetQueueAttributesOutput) GoString() string {
+ return s.String()
+}
+
+type TagQueueInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the queue.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+
+ // The list of tags to be added to the specified queue.
+ //
+ // Tags is a required field
+ Tags map[string]*string `locationName:"Tag" locationNameKey:"Key" locationNameValue:"Value" type:"map" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s TagQueueInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagQueueInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TagQueueInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TagQueueInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.Tags == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tags"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *TagQueueInput) SetQueueUrl(v string) *TagQueueInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *TagQueueInput) SetTags(v map[string]*string) *TagQueueInput {
+ s.Tags = v
+ return s
+}
+
+type TagQueueOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s TagQueueOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagQueueOutput) GoString() string {
+ return s.String()
+}
+
+type UntagQueueInput struct {
+ _ struct{} `type:"structure"`
+
+ // The URL of the queue.
+ //
+ // QueueUrl is a required field
+ QueueUrl *string `type:"string" required:"true"`
+
+ // The list of tags to be removed from the specified queue.
+ //
+ // TagKeys is a required field
+ TagKeys []*string `locationNameList:"TagKey" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s UntagQueueInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagQueueInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UntagQueueInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UntagQueueInput"}
+ if s.QueueUrl == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueUrl"))
+ }
+ if s.TagKeys == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagKeys"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetQueueUrl sets the QueueUrl field's value.
+func (s *UntagQueueInput) SetQueueUrl(v string) *UntagQueueInput {
+ s.QueueUrl = &v
+ return s
+}
+
+// SetTagKeys sets the TagKeys field's value.
+func (s *UntagQueueInput) SetTagKeys(v []*string) *UntagQueueInput {
+ s.TagKeys = v
+ return s
+}
+
+type UntagQueueOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s UntagQueueOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagQueueOutput) GoString() string {
+ return s.String()
+}
+
+const (
+ // MessageSystemAttributeNameSenderId is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameSenderId = "SenderId"
+
+ // MessageSystemAttributeNameSentTimestamp is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameSentTimestamp = "SentTimestamp"
+
+ // MessageSystemAttributeNameApproximateReceiveCount is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameApproximateReceiveCount = "ApproximateReceiveCount"
+
+ // MessageSystemAttributeNameApproximateFirstReceiveTimestamp is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameApproximateFirstReceiveTimestamp = "ApproximateFirstReceiveTimestamp"
+
+ // MessageSystemAttributeNameSequenceNumber is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameSequenceNumber = "SequenceNumber"
+
+ // MessageSystemAttributeNameMessageDeduplicationId is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameMessageDeduplicationId = "MessageDeduplicationId"
+
+ // MessageSystemAttributeNameMessageGroupId is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameMessageGroupId = "MessageGroupId"
+
+ // MessageSystemAttributeNameAwstraceHeader is a MessageSystemAttributeName enum value
+ MessageSystemAttributeNameAwstraceHeader = "AWSTraceHeader"
+)
+
+const (
+ // MessageSystemAttributeNameForSendsAwstraceHeader is a MessageSystemAttributeNameForSends enum value
+ MessageSystemAttributeNameForSendsAwstraceHeader = "AWSTraceHeader"
+)
+
+const (
+ // QueueAttributeNameAll is a QueueAttributeName enum value
+ QueueAttributeNameAll = "All"
+
+ // QueueAttributeNamePolicy is a QueueAttributeName enum value
+ QueueAttributeNamePolicy = "Policy"
+
+ // QueueAttributeNameVisibilityTimeout is a QueueAttributeName enum value
+ QueueAttributeNameVisibilityTimeout = "VisibilityTimeout"
+
+ // QueueAttributeNameMaximumMessageSize is a QueueAttributeName enum value
+ QueueAttributeNameMaximumMessageSize = "MaximumMessageSize"
+
+ // QueueAttributeNameMessageRetentionPeriod is a QueueAttributeName enum value
+ QueueAttributeNameMessageRetentionPeriod = "MessageRetentionPeriod"
+
+ // QueueAttributeNameApproximateNumberOfMessages is a QueueAttributeName enum value
+ QueueAttributeNameApproximateNumberOfMessages = "ApproximateNumberOfMessages"
+
+ // QueueAttributeNameApproximateNumberOfMessagesNotVisible is a QueueAttributeName enum value
+ QueueAttributeNameApproximateNumberOfMessagesNotVisible = "ApproximateNumberOfMessagesNotVisible"
+
+ // QueueAttributeNameCreatedTimestamp is a QueueAttributeName enum value
+ QueueAttributeNameCreatedTimestamp = "CreatedTimestamp"
+
+ // QueueAttributeNameLastModifiedTimestamp is a QueueAttributeName enum value
+ QueueAttributeNameLastModifiedTimestamp = "LastModifiedTimestamp"
+
+ // QueueAttributeNameQueueArn is a QueueAttributeName enum value
+ QueueAttributeNameQueueArn = "QueueArn"
+
+ // QueueAttributeNameApproximateNumberOfMessagesDelayed is a QueueAttributeName enum value
+ QueueAttributeNameApproximateNumberOfMessagesDelayed = "ApproximateNumberOfMessagesDelayed"
+
+ // QueueAttributeNameDelaySeconds is a QueueAttributeName enum value
+ QueueAttributeNameDelaySeconds = "DelaySeconds"
+
+ // QueueAttributeNameReceiveMessageWaitTimeSeconds is a QueueAttributeName enum value
+ QueueAttributeNameReceiveMessageWaitTimeSeconds = "ReceiveMessageWaitTimeSeconds"
+
+ // QueueAttributeNameRedrivePolicy is a QueueAttributeName enum value
+ QueueAttributeNameRedrivePolicy = "RedrivePolicy"
+
+ // QueueAttributeNameFifoQueue is a QueueAttributeName enum value
+ QueueAttributeNameFifoQueue = "FifoQueue"
+
+ // QueueAttributeNameContentBasedDeduplication is a QueueAttributeName enum value
+ QueueAttributeNameContentBasedDeduplication = "ContentBasedDeduplication"
+
+ // QueueAttributeNameKmsMasterKeyId is a QueueAttributeName enum value
+ QueueAttributeNameKmsMasterKeyId = "KmsMasterKeyId"
+
+ // QueueAttributeNameKmsDataKeyReusePeriodSeconds is a QueueAttributeName enum value
+ QueueAttributeNameKmsDataKeyReusePeriodSeconds = "KmsDataKeyReusePeriodSeconds"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go
new file mode 100644
index 000000000..e85e89a81
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go
@@ -0,0 +1,114 @@
+package sqs
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "fmt"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var (
+ errChecksumMissingBody = fmt.Errorf("cannot compute checksum. missing body")
+ errChecksumMissingMD5 = fmt.Errorf("cannot verify checksum. missing response MD5")
+)
+
+func setupChecksumValidation(r *request.Request) {
+ if aws.BoolValue(r.Config.DisableComputeChecksums) {
+ return
+ }
+
+ switch r.Operation.Name {
+ case opSendMessage:
+ r.Handlers.Unmarshal.PushBack(verifySendMessage)
+ case opSendMessageBatch:
+ r.Handlers.Unmarshal.PushBack(verifySendMessageBatch)
+ case opReceiveMessage:
+ r.Handlers.Unmarshal.PushBack(verifyReceiveMessage)
+ }
+}
+
+func verifySendMessage(r *request.Request) {
+ if r.DataFilled() && r.ParamsFilled() {
+ in := r.Params.(*SendMessageInput)
+ out := r.Data.(*SendMessageOutput)
+ err := checksumsMatch(in.MessageBody, out.MD5OfMessageBody)
+ if err != nil {
+ setChecksumError(r, err.Error())
+ }
+ }
+}
+
+func verifySendMessageBatch(r *request.Request) {
+ if r.DataFilled() && r.ParamsFilled() {
+ entries := map[string]*SendMessageBatchResultEntry{}
+ ids := []string{}
+
+ out := r.Data.(*SendMessageBatchOutput)
+ for _, entry := range out.Successful {
+ entries[*entry.Id] = entry
+ }
+
+ in := r.Params.(*SendMessageBatchInput)
+ for _, entry := range in.Entries {
+ if e, ok := entries[*entry.Id]; ok {
+ if err := checksumsMatch(entry.MessageBody, e.MD5OfMessageBody); err != nil {
+ ids = append(ids, *e.MessageId)
+ }
+ }
+ }
+ if len(ids) > 0 {
+ setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", "))
+ }
+ }
+}
+
+func verifyReceiveMessage(r *request.Request) {
+ if r.DataFilled() && r.ParamsFilled() {
+ ids := []string{}
+ out := r.Data.(*ReceiveMessageOutput)
+ for i, msg := range out.Messages {
+ err := checksumsMatch(msg.Body, msg.MD5OfBody)
+ if err != nil {
+ if msg.MessageId == nil {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log(fmt.Sprintf(
+ "WARN: SQS.ReceiveMessage failed checksum request id: %s, message %d has no message ID.",
+ r.RequestID, i,
+ ))
+ }
+ continue
+ }
+
+ ids = append(ids, *msg.MessageId)
+ }
+ }
+ if len(ids) > 0 {
+ setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", "))
+ }
+ }
+}
+
+func checksumsMatch(body, expectedMD5 *string) error {
+ if body == nil {
+ return errChecksumMissingBody
+ } else if expectedMD5 == nil {
+ return errChecksumMissingMD5
+ }
+
+ msum := md5.Sum([]byte(*body))
+ sum := hex.EncodeToString(msum[:])
+ if sum != *expectedMD5 {
+ return fmt.Errorf("expected MD5 checksum '%s', got '%s'", *expectedMD5, sum)
+ }
+
+ return nil
+}
+
+func setChecksumError(r *request.Request, format string, args ...interface{}) {
+ r.Retryable = aws.Bool(true)
+ r.Error = awserr.New("InvalidChecksum", fmt.Sprintf(format, args...), nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go
new file mode 100644
index 000000000..7498363de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/customizations.go
@@ -0,0 +1,9 @@
+package sqs
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func init() {
+ initRequest = func(r *request.Request) {
+ setupChecksumValidation(r)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go
new file mode 100644
index 000000000..3a3f55f09
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go
@@ -0,0 +1,55 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sqs provides the client and types for making API
+// requests to Amazon Simple Queue Service.
+//
+// Welcome to the Amazon Simple Queue Service API Reference.
+//
+// Amazon Simple Queue Service (Amazon SQS) is a reliable, highly-scalable hosted
+// queue for storing messages as they travel between applications or microservices.
+// Amazon SQS moves data between distributed application components and helps
+// you decouple these components.
+//
+// You can use AWS SDKs (http://aws.amazon.com/tools/#sdk) to access Amazon
+// SQS using your favorite programming language. The SDKs perform tasks such
+// as the following automatically:
+//
+// * Cryptographically sign your service requests
+//
+// * Retry requests
+//
+// * Handle error responses
+//
+// Additional Information
+//
+// * Amazon SQS Product Page (http://aws.amazon.com/sqs/)
+//
+// * Amazon Simple Queue Service Developer Guide Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html)
+// Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html)
+// Amazon SQS Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html)
+//
+// * Amazon SQS in the AWS CLI Command Reference (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html)
+//
+// * Amazon Web Services General Reference Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region)
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sqs-2012-11-05 for more information on this service.
+//
+// See sqs package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sqs/
+//
+// Using the Client
+//
+// To contact Amazon Simple Queue Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Queue Service client SQS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sqs/#New
+package sqs
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go
new file mode 100644
index 000000000..89eb40d7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/errors.go
@@ -0,0 +1,110 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sqs
+
+const (
+
+ // ErrCodeBatchEntryIdsNotDistinct for service response error code
+ // "AWS.SimpleQueueService.BatchEntryIdsNotDistinct".
+ //
+ // Two or more batch entries in the request have the same Id.
+ ErrCodeBatchEntryIdsNotDistinct = "AWS.SimpleQueueService.BatchEntryIdsNotDistinct"
+
+ // ErrCodeBatchRequestTooLong for service response error code
+ // "AWS.SimpleQueueService.BatchRequestTooLong".
+ //
+ // The length of all the messages put together is more than the limit.
+ ErrCodeBatchRequestTooLong = "AWS.SimpleQueueService.BatchRequestTooLong"
+
+ // ErrCodeEmptyBatchRequest for service response error code
+ // "AWS.SimpleQueueService.EmptyBatchRequest".
+ //
+ // The batch request doesn't contain any entries.
+ ErrCodeEmptyBatchRequest = "AWS.SimpleQueueService.EmptyBatchRequest"
+
+ // ErrCodeInvalidAttributeName for service response error code
+ // "InvalidAttributeName".
+ //
+ // The specified attribute doesn't exist.
+ ErrCodeInvalidAttributeName = "InvalidAttributeName"
+
+ // ErrCodeInvalidBatchEntryId for service response error code
+ // "AWS.SimpleQueueService.InvalidBatchEntryId".
+ //
+ // The Id of a batch entry in a batch request doesn't abide by the specification.
+ ErrCodeInvalidBatchEntryId = "AWS.SimpleQueueService.InvalidBatchEntryId"
+
+ // ErrCodeInvalidIdFormat for service response error code
+ // "InvalidIdFormat".
+ //
+ // The specified receipt handle isn't valid for the current version.
+ ErrCodeInvalidIdFormat = "InvalidIdFormat"
+
+ // ErrCodeInvalidMessageContents for service response error code
+ // "InvalidMessageContents".
+ //
+ // The message contains characters outside the allowed set.
+ ErrCodeInvalidMessageContents = "InvalidMessageContents"
+
+ // ErrCodeMessageNotInflight for service response error code
+ // "AWS.SimpleQueueService.MessageNotInflight".
+ //
+ // The specified message isn't in flight.
+ ErrCodeMessageNotInflight = "AWS.SimpleQueueService.MessageNotInflight"
+
+ // ErrCodeOverLimit for service response error code
+ // "OverLimit".
+ //
+ // The specified action violates a limit. For example, ReceiveMessage returns
+ // this error if the maximum number of inflight messages is reached and AddPermission
+ // returns this error if the maximum number of permissions for the queue is
+ // reached.
+ ErrCodeOverLimit = "OverLimit"
+
+ // ErrCodePurgeQueueInProgress for service response error code
+ // "AWS.SimpleQueueService.PurgeQueueInProgress".
+ //
+ // Indicates that the specified queue previously received a PurgeQueue request
+ // within the last 60 seconds (the time it can take to delete the messages in
+ // the queue).
+ ErrCodePurgeQueueInProgress = "AWS.SimpleQueueService.PurgeQueueInProgress"
+
+ // ErrCodeQueueDeletedRecently for service response error code
+ // "AWS.SimpleQueueService.QueueDeletedRecently".
+ //
+ // You must wait 60 seconds after deleting a queue before you can create another
+ // queue with the same name.
+ ErrCodeQueueDeletedRecently = "AWS.SimpleQueueService.QueueDeletedRecently"
+
+ // ErrCodeQueueDoesNotExist for service response error code
+ // "AWS.SimpleQueueService.NonExistentQueue".
+ //
+ // The specified queue doesn't exist.
+ ErrCodeQueueDoesNotExist = "AWS.SimpleQueueService.NonExistentQueue"
+
+ // ErrCodeQueueNameExists for service response error code
+ // "QueueAlreadyExists".
+ //
+ // A queue with this name already exists. Amazon SQS returns this error only
+ // if the request includes attributes whose values differ from those of the
+ // existing queue.
+ ErrCodeQueueNameExists = "QueueAlreadyExists"
+
+ // ErrCodeReceiptHandleIsInvalid for service response error code
+ // "ReceiptHandleIsInvalid".
+ //
+ // The specified receipt handle isn't valid.
+ ErrCodeReceiptHandleIsInvalid = "ReceiptHandleIsInvalid"
+
+ // ErrCodeTooManyEntriesInBatchRequest for service response error code
+ // "AWS.SimpleQueueService.TooManyEntriesInBatchRequest".
+ //
+ // The batch request contains more entries than permissible.
+ ErrCodeTooManyEntriesInBatchRequest = "AWS.SimpleQueueService.TooManyEntriesInBatchRequest"
+
+ // ErrCodeUnsupportedOperation for service response error code
+ // "AWS.SimpleQueueService.UnsupportedOperation".
+ //
+ // Error code 400. Unsupported operation.
+ ErrCodeUnsupportedOperation = "AWS.SimpleQueueService.UnsupportedOperation"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go
new file mode 100644
index 000000000..7bac89c4a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go
@@ -0,0 +1,96 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sqs
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// SQS provides the API operation methods for making requests to
+// Amazon Simple Queue Service. See this package's package overview docs
+// for details on the service.
+//
+// SQS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type SQS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "sqs" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "SQS" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the SQS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a SQS client from just a session.
+// svc := sqs.New(mySession)
+//
+// // Create a SQS client with additional configuration
+// svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SQS {
+ svc := &SQS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2012-11-05",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a SQS operation and runs any
+// custom request initialization.
+func (c *SQS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go
new file mode 100644
index 000000000..c9168943c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/sqsiface/interface.go
@@ -0,0 +1,144 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sqsiface provides an interface to enable mocking the Amazon Simple Queue Service service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package sqsiface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/sqs"
+)
+
+// SQSAPI provides an interface to enable mocking the
+// sqs.SQS service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // Amazon Simple Queue Service.
+// func myFunc(svc sqsiface.SQSAPI) bool {
+// // Make svc.AddPermission request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := sqs.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockSQSClient struct {
+// sqsiface.SQSAPI
+// }
+// func (m *mockSQSClient) AddPermission(input *sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockSQSClient{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type SQSAPI interface {
+ AddPermission(*sqs.AddPermissionInput) (*sqs.AddPermissionOutput, error)
+ AddPermissionWithContext(aws.Context, *sqs.AddPermissionInput, ...request.Option) (*sqs.AddPermissionOutput, error)
+ AddPermissionRequest(*sqs.AddPermissionInput) (*request.Request, *sqs.AddPermissionOutput)
+
+ ChangeMessageVisibility(*sqs.ChangeMessageVisibilityInput) (*sqs.ChangeMessageVisibilityOutput, error)
+ ChangeMessageVisibilityWithContext(aws.Context, *sqs.ChangeMessageVisibilityInput, ...request.Option) (*sqs.ChangeMessageVisibilityOutput, error)
+ ChangeMessageVisibilityRequest(*sqs.ChangeMessageVisibilityInput) (*request.Request, *sqs.ChangeMessageVisibilityOutput)
+
+ ChangeMessageVisibilityBatch(*sqs.ChangeMessageVisibilityBatchInput) (*sqs.ChangeMessageVisibilityBatchOutput, error)
+ ChangeMessageVisibilityBatchWithContext(aws.Context, *sqs.ChangeMessageVisibilityBatchInput, ...request.Option) (*sqs.ChangeMessageVisibilityBatchOutput, error)
+ ChangeMessageVisibilityBatchRequest(*sqs.ChangeMessageVisibilityBatchInput) (*request.Request, *sqs.ChangeMessageVisibilityBatchOutput)
+
+ CreateQueue(*sqs.CreateQueueInput) (*sqs.CreateQueueOutput, error)
+ CreateQueueWithContext(aws.Context, *sqs.CreateQueueInput, ...request.Option) (*sqs.CreateQueueOutput, error)
+ CreateQueueRequest(*sqs.CreateQueueInput) (*request.Request, *sqs.CreateQueueOutput)
+
+ DeleteMessage(*sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error)
+ DeleteMessageWithContext(aws.Context, *sqs.DeleteMessageInput, ...request.Option) (*sqs.DeleteMessageOutput, error)
+ DeleteMessageRequest(*sqs.DeleteMessageInput) (*request.Request, *sqs.DeleteMessageOutput)
+
+ DeleteMessageBatch(*sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error)
+ DeleteMessageBatchWithContext(aws.Context, *sqs.DeleteMessageBatchInput, ...request.Option) (*sqs.DeleteMessageBatchOutput, error)
+ DeleteMessageBatchRequest(*sqs.DeleteMessageBatchInput) (*request.Request, *sqs.DeleteMessageBatchOutput)
+
+ DeleteQueue(*sqs.DeleteQueueInput) (*sqs.DeleteQueueOutput, error)
+ DeleteQueueWithContext(aws.Context, *sqs.DeleteQueueInput, ...request.Option) (*sqs.DeleteQueueOutput, error)
+ DeleteQueueRequest(*sqs.DeleteQueueInput) (*request.Request, *sqs.DeleteQueueOutput)
+
+ GetQueueAttributes(*sqs.GetQueueAttributesInput) (*sqs.GetQueueAttributesOutput, error)
+ GetQueueAttributesWithContext(aws.Context, *sqs.GetQueueAttributesInput, ...request.Option) (*sqs.GetQueueAttributesOutput, error)
+ GetQueueAttributesRequest(*sqs.GetQueueAttributesInput) (*request.Request, *sqs.GetQueueAttributesOutput)
+
+ GetQueueUrl(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error)
+ GetQueueUrlWithContext(aws.Context, *sqs.GetQueueUrlInput, ...request.Option) (*sqs.GetQueueUrlOutput, error)
+ GetQueueUrlRequest(*sqs.GetQueueUrlInput) (*request.Request, *sqs.GetQueueUrlOutput)
+
+ ListDeadLetterSourceQueues(*sqs.ListDeadLetterSourceQueuesInput) (*sqs.ListDeadLetterSourceQueuesOutput, error)
+ ListDeadLetterSourceQueuesWithContext(aws.Context, *sqs.ListDeadLetterSourceQueuesInput, ...request.Option) (*sqs.ListDeadLetterSourceQueuesOutput, error)
+ ListDeadLetterSourceQueuesRequest(*sqs.ListDeadLetterSourceQueuesInput) (*request.Request, *sqs.ListDeadLetterSourceQueuesOutput)
+
+ ListQueueTags(*sqs.ListQueueTagsInput) (*sqs.ListQueueTagsOutput, error)
+ ListQueueTagsWithContext(aws.Context, *sqs.ListQueueTagsInput, ...request.Option) (*sqs.ListQueueTagsOutput, error)
+ ListQueueTagsRequest(*sqs.ListQueueTagsInput) (*request.Request, *sqs.ListQueueTagsOutput)
+
+ ListQueues(*sqs.ListQueuesInput) (*sqs.ListQueuesOutput, error)
+ ListQueuesWithContext(aws.Context, *sqs.ListQueuesInput, ...request.Option) (*sqs.ListQueuesOutput, error)
+ ListQueuesRequest(*sqs.ListQueuesInput) (*request.Request, *sqs.ListQueuesOutput)
+
+ PurgeQueue(*sqs.PurgeQueueInput) (*sqs.PurgeQueueOutput, error)
+ PurgeQueueWithContext(aws.Context, *sqs.PurgeQueueInput, ...request.Option) (*sqs.PurgeQueueOutput, error)
+ PurgeQueueRequest(*sqs.PurgeQueueInput) (*request.Request, *sqs.PurgeQueueOutput)
+
+ ReceiveMessage(*sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error)
+ ReceiveMessageWithContext(aws.Context, *sqs.ReceiveMessageInput, ...request.Option) (*sqs.ReceiveMessageOutput, error)
+ ReceiveMessageRequest(*sqs.ReceiveMessageInput) (*request.Request, *sqs.ReceiveMessageOutput)
+
+ RemovePermission(*sqs.RemovePermissionInput) (*sqs.RemovePermissionOutput, error)
+ RemovePermissionWithContext(aws.Context, *sqs.RemovePermissionInput, ...request.Option) (*sqs.RemovePermissionOutput, error)
+ RemovePermissionRequest(*sqs.RemovePermissionInput) (*request.Request, *sqs.RemovePermissionOutput)
+
+ SendMessage(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error)
+ SendMessageWithContext(aws.Context, *sqs.SendMessageInput, ...request.Option) (*sqs.SendMessageOutput, error)
+ SendMessageRequest(*sqs.SendMessageInput) (*request.Request, *sqs.SendMessageOutput)
+
+ SendMessageBatch(*sqs.SendMessageBatchInput) (*sqs.SendMessageBatchOutput, error)
+ SendMessageBatchWithContext(aws.Context, *sqs.SendMessageBatchInput, ...request.Option) (*sqs.SendMessageBatchOutput, error)
+ SendMessageBatchRequest(*sqs.SendMessageBatchInput) (*request.Request, *sqs.SendMessageBatchOutput)
+
+ SetQueueAttributes(*sqs.SetQueueAttributesInput) (*sqs.SetQueueAttributesOutput, error)
+ SetQueueAttributesWithContext(aws.Context, *sqs.SetQueueAttributesInput, ...request.Option) (*sqs.SetQueueAttributesOutput, error)
+ SetQueueAttributesRequest(*sqs.SetQueueAttributesInput) (*request.Request, *sqs.SetQueueAttributesOutput)
+
+ TagQueue(*sqs.TagQueueInput) (*sqs.TagQueueOutput, error)
+ TagQueueWithContext(aws.Context, *sqs.TagQueueInput, ...request.Option) (*sqs.TagQueueOutput, error)
+ TagQueueRequest(*sqs.TagQueueInput) (*request.Request, *sqs.TagQueueOutput)
+
+ UntagQueue(*sqs.UntagQueueInput) (*sqs.UntagQueueOutput, error)
+ UntagQueueWithContext(aws.Context, *sqs.UntagQueueInput, ...request.Option) (*sqs.UntagQueueOutput, error)
+ UntagQueueRequest(*sqs.UntagQueueInput) (*request.Request, *sqs.UntagQueueOutput)
+}
+
+var _ SQSAPI = (*sqs.SQS)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644
index 000000000..9c5ed4545
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,2750 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRole operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRole for more information on using the AssumeRole
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleRequest method.
+// req, resp := client.AssumeRoleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+ op := &request.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ output = &AssumeRoleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRole API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials that you can use to access
+// AWS resources that you might not normally have access to. These temporary
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use AssumeRole within your account or for cross-account
+// access. For a comparison of AssumeRole with other API operations that produce
+// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// You cannot use AWS account root user credentials to call AssumeRole. You
+// must use credentials for an IAM user or an IAM role to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account.
+// Then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
+// in the IAM User Guide.
+//
+// By default, the temporary security credentials created by AssumeRole last
+// for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: You cannot call
+// the AWS STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide.
+//
+// To assume a role from a different account, your AWS account must be trusted
+// by the role. The trust relationship is defined in the role's trust policy
+// when the role is created. That trust policy states which accounts are allowed
+// to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have permissions
+// that are delegated from the user account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN
+// of the role in the other account. If the user is in the same account as the
+// role, then you can do either of the following:
+//
+// * Attach a policy to the user (identical to the previous user in a different
+// account).
+//
+// * Add the user as a principal directly in the role's trust policy.
+//
+// In this case, the trust policy acts as an IAM resource-based policy. Users
+// in the same account as the role do not need explicit permission to assume
+// the role. For more information about trust policies and resource-based policies,
+// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
+// in the IAM User Guide.
+//
+// Using MFA with AssumeRole
+//
+// (Optional) You can include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios to ensure
+// that the user that assumes the role has been authenticated with an AWS MFA
+// device. In that scenario, the trust policy of the role being assumed includes
+// a condition that tests for MFA authentication. If the caller does not include
+// valid MFA information, the request to assume the role is denied. The condition
+// in a trust policy that tests for MFA authentication might look like the following
+// example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA device produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithSAML operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithSAMLRequest method.
+// req, resp := client.AssumeRoleWithSAMLRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ output = &AssumeRoleWithSAMLOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other API operations that produce temporary credentials, see Requesting
+// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
+// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
+// duration setting for the role. This setting can have a value from 1 hour
+// to 12 hours. To learn how to view the maximum value for your role, see View
+// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider. You must
+// also create an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ output = &AssumeRoleWithWebIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider. Example providers
+// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID
+// Connect-compatible identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
+// to uniquely identify a user. You can also supply the user with a consistent
+// identity throughout the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application. You also don't need to deploy
+// server-based proxy services that use long-term AWS credentials. Instead,
+// the identity of the caller is validated by using a token from the web identity
+// provider. For a comparison of AssumeRoleWithWebIdentity with the other API
+// operations that produce temporary credentials, see Requesting Temporary Security
+// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service API operations.
+//
+// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// Walk through the process of authenticating through Login with Amazon,
+// Facebook, or Google, getting temporary security credentials, and then
+// using those credentials to make a request to AWS.
+//
+// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and
+// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
+// These toolkits contain sample apps that show how to invoke the identity
+// providers, and then how to use the information from these providers to
+// get and use temporary security credentials.
+//
+// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
+// This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon
+// S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
+// The request could not be fulfilled because the non-AWS identity provider
+// (IDP) that was asked to verify the incoming identity token could not be reached.
+// This is often a transient error caused by network conditions. Retry the request
+// a limited number of times so that you don't exceed the request rate. If the
+// error persists, the non-AWS identity provider might be down or not responding.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DecodeAuthorizationMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DecodeAuthorizationMessageRequest method.
+// req, resp := client.DecodeAuthorizationMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+ op := &request.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ output = &DecodeAuthorizationMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an operation that he
+// or she has requested, the request returns a Client.UnauthorizedOperation
+// response (an HTTP 403 response). Some AWS operations additionally return
+// an encoded message that can provide details about this authorization failure.
+//
+// Only certain AWS operations return an encoded authorization message. The
+// documentation for an individual operation indicates whether that operation
+// returns an encoded message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the operation
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// * Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see Determining Whether
+// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the context of the user's request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
+// This error is returned if the message passed to DecodeAuthorizationMessage
+// was invalid. This can happen if the token contains invalid characters, such
+// as linebreaks.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetAccessKeyInfo = "GetAccessKeyInfo"
+
+// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the
+// client's request for the GetAccessKeyInfo operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetAccessKeyInfoRequest method.
+// req, resp := client.GetAccessKeyInfoRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
+func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) {
+ op := &request.Operation{
+ Name: opGetAccessKeyInfo,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetAccessKeyInfoInput{}
+ }
+
+ output = &GetAccessKeyInfoOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetAccessKeyInfo API operation for AWS Security Token Service.
+//
+// Returns the account identifier for the specified access key ID.
+//
+// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE)
+// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY).
+// For more information about access keys, see Managing Access Keys for IAM
+// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
+// in the IAM User Guide.
+//
+// When you pass an access key ID to this operation, it returns the ID of the
+// AWS account to which the keys belong. Access key IDs beginning with AKIA
+// are long-term credentials for an IAM user or the AWS account root user. Access
+// key IDs beginning with ASIA are temporary credentials that are created using
+// STS operations. If the account in the response belongs to you, you can sign
+// in as the root user and review your root user access keys. Then, you can
+// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
+// to learn which IAM user owns the keys. To learn who requested the temporary
+// credentials for an ASIA access key, view the STS events in your CloudTrail
+// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html).
+//
+// This operation does not indicate the state of the access key. The key might
+// be active, inactive, or deleted. Active keys might not have permissions to
+// perform an operation. Providing a deleted access key might return an error
+// that the key doesn't exist.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetAccessKeyInfo for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
+func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) {
+ req, out := c.GetAccessKeyInfoRequest(input)
+ return out, req.Send()
+}
+
+// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetAccessKeyInfo for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) {
+ req, out := c.GetAccessKeyInfoRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the GetCallerIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetCallerIdentity for more information on using the GetCallerIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetCallerIdentityRequest method.
+// req, resp := client.GetCallerIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
+ op := &request.Operation{
+ Name: opGetCallerIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCallerIdentityInput{}
+ }
+
+ output = &GetCallerIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
+// Returns details about the IAM user or role whose credentials are used to
+// call the operation.
+//
+// No permissions are required to perform this operation. If an administrator
+// adds a policy to your IAM user or role that explicitly denies access to the
+// sts:GetCallerIdentity action, you can still perform this operation. Permissions
+// are not required because the same information is returned when an IAM user
+// or role is denied access. To view an example response, see I Am Not Authorized
+// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetFederationToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetFederationToken for more information on using the GetFederationToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetFederationTokenRequest method.
+// req, resp := client.GetFederationTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ output = &GetFederationTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetFederationToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. You must
+// call the GetFederationToken operation using the long-term security credentials
+// of an IAM user. As a result, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other API operations that
+// produce temporary credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider. In this case, we recommend
+// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// You can also call GetFederationToken using the security credentials of an
+// AWS account root user, but we do not recommend it. Instead, we recommend
+// that you create an IAM user for the purpose of the proxy application. Then
+// attach a policy to the IAM user that limits federated users to only the actions
+// and resources that they need to access. For more information, see IAM Best
+// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// is 43,200 seconds (12 hours). Temporary credentials that are obtained by
+// using AWS account root user credentials have a maximum duration of 3,600
+// seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot use these credentials to call any IAM API operations.
+//
+// * You cannot call any STS API operations except GetCallerIdentity.
+//
+// Permissions
+//
+// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. The only exception
+// is when the credentials are used to access a resource that has a resource-based
+// policy that specifically references the federated user session in the Principal
+// element of the policy. When you pass session policies, the session permissions
+// are the intersection of the IAM user policies and the session policies that
+// you pass. This gives you a way to further restrict the permissions for a
+// federated user. You cannot use session policies to grant more permissions
+// than those that are defined in the permissions policy of the IAM user. For
+// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. For information about using GetFederationToken to
+// create temporary security credentials, see GetFederationToken—Federation
+// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetSessionToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetSessionToken for more information on using the GetSessionToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetSessionTokenRequest method.
+// req, resp := client.GetSessionTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+ op := &request.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ output = &GetSessionTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSessionToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances.
+// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA
+// code that is associated with their MFA device. Using the temporary security
+// credentials that are returned from the call, IAM users can then make programmatic
+// calls to API operations that require MFA authentication. If you do not supply
+// a correct MFA code, then the API returns an access denied error. For a comparison
+// of GetSessionToken with the other API operations that produce temporary credentials,
+// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken operation must be called by using the long-term AWS security
+// credentials of the AWS account root user or an IAM user. Credentials that
+// are created by IAM users are valid for the duration that you specify. This
+// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600
+// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials
+// based on account credentials can range from 900 seconds (15 minutes) up to
+// 3,600 seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot call any IAM API operations unless MFA authentication information
+// is included in the request.
+//
+// * You cannot call any STS API except AssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with AWS account root user
+// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The credentials that are returned by GetSessionToken are based on permissions
+// associated with the user whose credentials were used to call the operation.
+// If GetSessionToken is called using AWS account root user credentials, the
+// temporary credentials have root user permissions. Similarly, if GetSessionToken
+// is called using the credentials of an IAM user, the temporary credentials
+// have the same permissions as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+type AssumeRoleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that might be required when you assume a role in another
+ // account. If the administrator of the account to which the role belongs provided
+ // you with an external ID, then provide that value in the ExternalId parameter.
+ // This value can be any string, such as a passphrase or account number. A cross-account
+ // role is usually set up to trust everyone in an account. Therefore, the administrator
+ // of the trusting account might send an external ID to the administrator of
+ // the trusted account. That way, only someone with the ID can assume the role,
+ // rather than everyone in the account. For more information about the external
+ // ID, see How to Use an External ID When Granting Access to Your AWS Resources
+ // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use
+ // the role's temporary credentials in subsequent AWS API calls to access resources
+ // in the account that owns the role. You cannot use session policies to grant
+ // more permissions than those allowed by the identity-based policy of the role
+ // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as managed session policies. The policies must exist in the same account
+ // as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests that use the temporary security credentials will expose the role
+ // session name to the external account in their AWS CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+ s.ExternalId = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput {
+ s.PolicyArns = v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type AssumeRoleWithSAMLInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. Your role session lasts for
+ // the duration that you specify for the DurationSeconds parameter, or until
+ // the time specified in the SAML authentication response's SessionNotOnOrAfter
+ // value, whichever is shorter. You can provide a DurationSeconds value from
+ // 900 seconds (15 minutes) up to the maximum session duration setting for the
+ // role. This setting can have a value from 1 hour to 12 hours. If you specify
+ // a value higher than this setting, the operation fails. For example, if you
+ // specify a session duration of 12 hours, but your administrator set the maximum
+ // session duration to 6 hours, your operation fails. To learn how to view the
+ // maximum value for your role, see View the Maximum Session Duration Setting
+ // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use
+ // the role's temporary credentials in subsequent AWS API calls to access resources
+ // in the account that owns the role. You cannot use session policies to grant
+ // more permissions than those allowed by the identity-based policy of the role
+ // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as managed session policies. The policies must exist in the same account
+ // as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ //
+ // PrincipalArn is a required field
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the IAM User Guide.
+ //
+ // SAMLAssertion is a required field
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PrincipalArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
+ }
+ if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.SAMLAssertion == nil {
+ invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
+ }
+ if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput {
+ s.PolicyArns = v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+ s.SAMLAssertion = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+ s.Issuer = &v
+ return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+ s.NameQualifier = &v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+ s.SubjectType = &v
+ return s
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use
+ // the role's temporary credentials in subsequent AWS API calls to access resources
+ // in the account that owns the role. You cannot use session policies to grant
+ // more permissions than those allowed by the identity-based policy of the role
+ // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as managed session policies. The policies must exist in the same account
+ // as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ //
+ // WebIdentityToken is a required field
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.WebIdentityToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
+ }
+ if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput {
+ s.PolicyArns = v
+ return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+ s.ProviderId = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+ s.WebIdentityToken = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Provider = &v
+ return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SubjectFromWebIdentityToken = &v
+ return s
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ //
+ // AssumedRoleId is a required field
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+ s.Arn = &v
+ return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+ s.AssumedRoleId = &v
+ return s
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+ _ struct{} `type:"structure"`
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `type:"timestamp" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ //
+ // SessionToken is a required field
+ SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+ s.SessionToken = &v
+ return s
+}
+
+type DecodeAuthorizationMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The encoded message that was returned with the response.
+ //
+ // EncodedMessage is a required field
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+ if s.EncodedMessage == nil {
+ invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
+ }
+ if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+ s.EncodedMessage = &v
+ return s
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+ s.DecodedMessage = &v
+ return s
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // FederatedUserId is a required field
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+ s.Arn = &v
+ return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+ s.FederatedUserId = &v
+ return s
+}
+
+type GetAccessKeyInfoInput struct {
+ _ struct{} `type:"structure"`
+
+ // The identifier of an access key.
+ //
+ // This parameter allows (through its regex pattern) a string of characters
+ // that can consist of any upper- or lowercased letter or digit.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetAccessKeyInfoInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAccessKeyInfoInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetAccessKeyInfoInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"}
+ if s.AccessKeyId == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
+ }
+ if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 {
+ invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput {
+ s.AccessKeyId = &v
+ return s
+}
+
+type GetAccessKeyInfoOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The number used to identify the AWS account.
+ Account *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetAccessKeyInfoOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAccessKeyInfoOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput {
+ s.Account = &v
+ return s
+}
+
+type GetCallerIdentityInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+type GetCallerIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string `type:"string"`
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string `min:"20" type:"string"`
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity that is making the call. The values returned are those listed
+ // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+ s.Account = &v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+ s.Arn = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+ s.UserId = &v
+ return s
+}
+
+type GetFederationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
+ // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account root user credentials are restricted to a maximum of 3,600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using root user credentials defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // Name is a required field
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // to this operation. You can pass a single JSON policy document to use as an
+ // inline session policy. You can also specify up to 10 managed policies to
+ // use as managed session policies.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions. The only exception
+ // is when the credentials are used to access a resource that has a resource-based
+ // policy that specifically references the federated user session in the Principal
+ // element of the policy.
+ //
+ // When you pass session policies, the session permissions are the intersection
+ // of the IAM user policies and the session policies that you pass. This gives
+ // you a way to further restrict the permissions for a federated user. You cannot
+ // use session policies to grant more permissions than those that are defined
+ // in the permissions policy of the IAM user. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as a managed session policy. The policies must exist in the same account
+ // as the IAM user that is requesting federated access.
+ //
+ // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // to this operation. You can pass a single JSON policy document to use as an
+ // inline session policy. You can also specify up to 10 managed policies to
+ // use as managed session policies. The plain text that you use for both inline
+ // and managed session policies shouldn't exceed 2048 characters. You can provide
+ // up to 10 managed policy ARNs. For more information about ARNs, see Amazon
+ // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions. The only exception
+ // is when the credentials are used to access a resource that has a resource-based
+ // policy that specifically references the federated user session in the Principal
+ // element of the policy.
+ //
+ // When you pass session policies, the session permissions are the intersection
+ // of the IAM user policies and the session policies that you pass. This gives
+ // you a way to further restrict the permissions for a federated user. You cannot
+ // use session policies to grant more permissions than those that are defined
+ // in the permissions policy of the IAM user. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("Name", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+ s.Name = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput {
+ s.PolicyArns = v
+ return s
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+ s.FederatedUser = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type GetSessionTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
+ // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3,600 seconds (one
+ // hour). If the duration is longer than one hour, the session for AWS account
+ // owners defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, the user must provide a code when requesting a set of temporary
+ // security credentials. A user who fails to provide the code receives an "access
+ // denied" response when requesting resources that require MFA authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// A reference to the IAM managed policy that is passed as a session policy
+// for a role session or a federated user session.
+type PolicyDescriptorType struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
+ // policy for the role. For more information about ARNs, see Amazon Resource
+ // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ Arn *string `locationName:"arn" min:"20" type:"string"`
+}
+
+// String returns the string representation
+func (s PolicyDescriptorType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PolicyDescriptorType) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PolicyDescriptorType) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"}
+ if s.Arn != nil && len(*s.Arn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("Arn", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetArn sets the Arn field's value.
+func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
+ s.Arn = &v
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
new file mode 100644
index 000000000..d5307fcaa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
@@ -0,0 +1,11 @@
+package sts
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func init() {
+ initRequest = customizeRequest
+}
+
+func customizeRequest(r *request.Request) {
+ r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644
index 000000000..fcb720dca
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,108 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// By default, AWS Security Token Service (STS) is available as a global service,
+// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
+// Global requests map to the US East (N. Virginia) region. AWS recommends using
+// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
+// build in redundancy, and increase session token validity. For more information,
+// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Most AWS Regions are enabled for operations in all AWS services by default.
+// Those Regions are automatically activated for use with AWS STS. Some Regions,
+// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
+// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
+// in the AWS General Reference. When you enable these AWS Regions, they are
+// automatically activated for use with AWS STS. You cannot activate the STS
+// endpoint for a Region that is disabled. Tokens that are valid in all AWS
+// Regions are longer than tokens that are valid in Regions that are enabled
+// by default. Changing this setting might affect existing systems where you
+// temporarily store tokens. For more information, see Managing Global Endpoint
+// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
+// in the IAM User Guide.
+//
+// After you activate a Region for use with AWS STS, you can direct AWS STS
+// API calls to that Region. AWS STS recommends that you provide both the Region
+// and endpoint when you make calls to a Regional endpoint. You can provide
+// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
+// Kong). In this case, the calls are directed to the STS Regional endpoint.
+// However, if you provide the Region alone for Regions enabled by default,
+// the calls are directed to the global endpoint of https://sts.amazonaws.com.
+//
+// To view the list of AWS STS endpoints and whether they are active by default,
+// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
+// in the IAM User Guide.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on.
+//
+// If you activate AWS STS endpoints in Regions other than the default global
+// endpoint, then you must also turn on CloudTrail logging in those Regions.
+// This is necessary to record any AWS STS API calls that are made in those
+// Regions. For more information, see Turning On CloudTrail in Additional Regions
+// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
+// in the AWS CloudTrail User Guide.
+//
+// AWS Security Token Service (STS) is a global service with a single endpoint
+// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
+// to a global service. However, because this endpoint is physically located
+// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
+// Region. CloudTrail does not write these logs to the US East (Ohio) Region
+// unless you choose to include global service logs in that Region. CloudTrail
+// writes calls to all Regional endpoints to their respective Regions. For example,
+// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
+// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
+// (Frankfurt) Region.
+//
+// To learn more about CloudTrail, including how to turn it on and find your
+// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To contact AWS Security Token Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100644
index 000000000..a3e378eda
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // The web identity token that was passed is expired or is not valid. Get a
+ // new identity token from the identity provider and then retry the request.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeIDPCommunicationErrorException for service response error code
+ // "IDPCommunicationError".
+ //
+ // The request could not be fulfilled because the non-AWS identity provider
+ // (IDP) that was asked to verify the incoming identity token could not be reached.
+ // This is often a transient error caused by network conditions. Retry the request
+ // a limited number of times so that you don't exceed the request rate. If the
+ // error persists, the non-AWS identity provider might be down or not responding.
+ ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+ // ErrCodeIDPRejectedClaimException for service response error code
+ // "IDPRejectedClaim".
+ //
+ // The identity provider (IdP) reported that authentication failed. This might
+ // be because the claim is invalid.
+ //
+ // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+ // can also mean that the claim has expired or has been explicitly revoked.
+ ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+ // ErrCodeInvalidAuthorizationMessageException for service response error code
+ // "InvalidAuthorizationMessageException".
+ //
+ // This error is returned if the message passed to DecodeAuthorizationMessage
+ // was invalid. This can happen if the token contains invalid characters, such
+ // as linebreaks.
+ ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+ // ErrCodeInvalidIdentityTokenException for service response error code
+ // "InvalidIdentityToken".
+ //
+ // The web identity token that was passed could not be validated by AWS. Get
+ // a new identity token from the identity provider and then retry the request.
+ ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+ // ErrCodeMalformedPolicyDocumentException for service response error code
+ // "MalformedPolicyDocument".
+ //
+ // The request was rejected because the policy document was malformed. The error
+ // message describes the specific error.
+ ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+ // ErrCodePackedPolicyTooLargeException for service response error code
+ // "PackedPolicyTooLarge".
+ //
+ // The request was rejected because the policy document was too large. The error
+ // message describes how big the policy document is, in packed form, as a percentage
+ // of what the API allows.
+ ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+ // ErrCodeRegionDisabledException for service response error code
+ // "RegionDisabledException".
+ //
+ // STS is not activated in the requested region for the account that is being
+ // asked to generate credentials. The account administrator must use the IAM
+ // console to activate STS in that region. For more information, see Activating
+ // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
+ ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644
index 000000000..2c3c3d2c1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,96 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "sts" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a STS client from just a session.
+// svc := sts.New(mySession)
+//
+// // Create a STS client with additional configuration
+// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS {
+ svc := &STS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ PartitionID: partitionID,
+ Endpoint: endpoint,
+ APIVersion: "2011-06-15",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
new file mode 100644
index 000000000..e2e1d6efe
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
@@ -0,0 +1,96 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package stsiface
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// STSAPI provides an interface to enable mocking the
+// sts.STS service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+// // myFunc uses an SDK service client to make a request to
+// // AWS Security Token Service.
+// func myFunc(svc stsiface.STSAPI) bool {
+// // Make svc.AssumeRole request
+// }
+//
+// func main() {
+// sess := session.New()
+// svc := sts.New(sess)
+//
+// myFunc(svc)
+// }
+//
+// In your _test.go file:
+//
+// // Define a mock struct to be used in your unit tests of myFunc.
+// type mockSTSClient struct {
+// stsiface.STSAPI
+// }
+// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
+// // mock response/functionality
+// }
+//
+// func TestMyFunc(t *testing.T) {
+// // Setup Test
+// mockSvc := &mockSTSClient{}
+//
+// myfunc(mockSvc)
+//
+// // Verify myFunc's functionality
+// }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type STSAPI interface {
+ AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+ AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
+ AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
+
+ AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
+ AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error)
+ AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
+
+ AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
+ AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error)
+ AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
+
+ DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
+ DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error)
+ DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
+
+ GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error)
+ GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error)
+ GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput)
+
+ GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error)
+ GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error)
+ GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput)
+
+ GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
+ GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error)
+ GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
+
+ GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
+ GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error)
+ GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
+}
+
+var _ STSAPI = (*sts.STS)(nil)
diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE
new file mode 100644
index 000000000..37ec93a14
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go
new file mode 100644
index 000000000..eac1c7664
--- /dev/null
+++ b/vendor/github.com/golang/groupcache/lru/lru.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2013 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package lru implements an LRU cache.
+package lru
+
+import "container/list"
+
+// Cache is an LRU cache. It is not safe for concurrent access.
+type Cache struct {
+ // MaxEntries is the maximum number of cache entries before
+ // an item is evicted. Zero means no limit.
+ MaxEntries int
+
+ // OnEvicted optionally specifies a callback function to be
+ // executed when an entry is purged from the cache.
+ OnEvicted func(key Key, value interface{})
+
+ ll *list.List
+ cache map[interface{}]*list.Element
+}
+
+// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
+type Key interface{}
+
+type entry struct {
+ key Key
+ value interface{}
+}
+
+// New creates a new Cache.
+// If maxEntries is zero, the cache has no limit and it's assumed
+// that eviction is done by the caller.
+func New(maxEntries int) *Cache {
+ return &Cache{
+ MaxEntries: maxEntries,
+ ll: list.New(),
+ cache: make(map[interface{}]*list.Element),
+ }
+}
+
+// Add adds a value to the cache.
+func (c *Cache) Add(key Key, value interface{}) {
+ if c.cache == nil {
+ c.cache = make(map[interface{}]*list.Element)
+ c.ll = list.New()
+ }
+ if ee, ok := c.cache[key]; ok {
+ c.ll.MoveToFront(ee)
+ ee.Value.(*entry).value = value
+ return
+ }
+ ele := c.ll.PushFront(&entry{key, value})
+ c.cache[key] = ele
+ if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
+ c.RemoveOldest()
+ }
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key Key) (value interface{}, ok bool) {
+ if c.cache == nil {
+ return
+ }
+ if ele, hit := c.cache[key]; hit {
+ c.ll.MoveToFront(ele)
+ return ele.Value.(*entry).value, true
+ }
+ return
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key Key) {
+ if c.cache == nil {
+ return
+ }
+ if ele, hit := c.cache[key]; hit {
+ c.removeElement(ele)
+ }
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ if c.cache == nil {
+ return
+ }
+ ele := c.ll.Back()
+ if ele != nil {
+ c.removeElement(ele)
+ }
+}
+
+func (c *Cache) removeElement(e *list.Element) {
+ c.ll.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.cache, kv.key)
+ if c.OnEvicted != nil {
+ c.OnEvicted(kv.key, kv.value)
+ }
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ if c.cache == nil {
+ return 0
+ }
+ return c.ll.Len()
+}
+
+// Clear purges all stored items from the cache.
+func (c *Cache) Clear() {
+ if c.OnEvicted != nil {
+ for _, e := range c.cache {
+ kv := e.Value.(*entry)
+ c.OnEvicted(kv.key, kv.value)
+ }
+ }
+ c.ll = nil
+ c.cache = nil
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 000000000..63dc05785
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
+
+package descriptor
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/descriptor.proto.
+
+type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
+
+const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
+const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
+const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
+const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
+const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
+const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
+const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
+const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
+const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
+const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
+const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
+const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
+const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
+const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
+const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
+const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
+const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
+const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
+
+var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
+var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
+
+type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
+
+const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
+const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
+const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
+
+var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
+var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
+
+type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
+
+const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
+const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
+const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
+
+var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
+var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
+
+type FieldOptions_CType = descriptorpb.FieldOptions_CType
+
+const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
+const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
+const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
+
+var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
+var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
+
+type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
+
+const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
+const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
+const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
+
+var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
+var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
+
+type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
+
+const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
+const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
+const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
+
+var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
+var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
+
+type FileDescriptorSet = descriptorpb.FileDescriptorSet
+type FileDescriptorProto = descriptorpb.FileDescriptorProto
+type DescriptorProto = descriptorpb.DescriptorProto
+type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
+type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
+type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
+type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
+type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
+type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
+type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
+
+const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
+const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
+
+type FileOptions = descriptorpb.FileOptions
+
+const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
+const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
+const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
+const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
+const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
+const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
+const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
+const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
+const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
+
+type MessageOptions = descriptorpb.MessageOptions
+
+const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
+const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
+const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
+
+type FieldOptions = descriptorpb.FieldOptions
+
+const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
+const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
+const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
+const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
+const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
+
+type OneofOptions = descriptorpb.OneofOptions
+type EnumOptions = descriptorpb.EnumOptions
+
+const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
+
+type EnumValueOptions = descriptorpb.EnumValueOptions
+
+const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
+
+type ServiceOptions = descriptorpb.ServiceOptions
+
+const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
+
+type MethodOptions = descriptorpb.MethodOptions
+
+const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
+const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
+
+type UninterpretedOption = descriptorpb.UninterpretedOption
+type SourceCodeInfo = descriptorpb.SourceCodeInfo
+type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
+type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
+type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
+type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
+type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
+type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
+type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
+
+var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
+ 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
+ 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x32,
+}
+
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
+func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
+ if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 000000000..e729dcff1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,165 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+const urlPrefix = "type.googleapis.com/"
+
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+ name, err := anyMessageName(any)
+ return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
+ if any == nil {
+ return "", fmt.Errorf("message is nil")
+ }
+ name := protoreflect.FullName(any.TypeUrl)
+ if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+ name = name[i+len("/"):]
+ }
+ if !name.IsValid() {
+ return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+ }
+ return name, nil
+}
+
+// MarshalAny marshals the given message m into an anypb.Any message.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+ switch dm := m.(type) {
+ case DynamicAny:
+ m = dm.Message
+ case *DynamicAny:
+ if dm == nil {
+ return nil, proto.ErrNil
+ }
+ m = dm.Message
+ }
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return nil, err
+ }
+ return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
+}
+
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+func Empty(any *anypb.Any) (proto.Message, error) {
+ name, err := anyMessageName(any)
+ if err != nil {
+ return nil, err
+ }
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+ if err != nil {
+ return nil, err
+ }
+ return proto.MessageV1(mt.New().Interface()), nil
+}
+
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
+//
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+ if dm, ok := m.(*DynamicAny); ok {
+ if dm.Message == nil {
+ var err error
+ dm.Message, err = Empty(any)
+ if err != nil {
+ return err
+ }
+ }
+ m = dm.Message
+ }
+
+ anyName, err := AnyMessageName(any)
+ if err != nil {
+ return err
+ }
+ msgName := proto.MessageName(m)
+ if anyName != msgName {
+ return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
+ }
+ return proto.Unmarshal(any.Value, m)
+}
+
+// Is reports whether the Any message contains a message of the specified type.
+func Is(any *anypb.Any, m proto.Message) bool {
+ if any == nil || m == nil {
+ return false
+ }
+ name := proto.MessageName(m)
+ if !strings.HasSuffix(any.TypeUrl, name) {
+ return false
+ }
+ return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+ if m.Message == nil {
+ return ""
+ }
+ return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+ if m.Message == nil {
+ return
+ }
+ m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+ return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+ if m.Message == nil {
+ return nil
+ }
+ return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+ return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+ return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+ return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+ return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+ return dynamicAny{t.MessageType.Zero()}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 000000000..0ef27d33d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/any/any.proto
+
+package any
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/any.proto.
+
+type Any = anypb.Any
+
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+ file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 000000000..fb9edd5c6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ptypes provides functionality for interacting with well-known types.
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 000000000..6110ae8a4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ durationpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
+const (
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+ if err := validateDuration(dur); err != nil {
+ return 0, err
+ }
+ d := time.Duration(dur.Seconds) * time.Second
+ if int64(d/time.Second) != dur.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+ }
+ if dur.Nanos != 0 {
+ d += time.Duration(dur.Nanos) * time.Nanosecond
+ if (d < 0) != (dur.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a durationpb.Duration.
+func DurationProto(d time.Duration) *durationpb.Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &durationpb.Duration{
+ Seconds: int64(secs),
+ Nanos: int32(nanos),
+ }
+}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+ if dur == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", dur)
+ }
+ if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", dur)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 000000000..d0079ee3e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,63 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
+
+package duration
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/duration.proto.
+
+type Duration = durationpb.Duration
+
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 000000000..16686a655
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/empty/empty.proto
+
+package empty
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/empty.proto.
+
+type Empty = emptypb.Empty
+
+var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
+ 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
+ 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
+func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 000000000..026d0d491
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,103 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+// Range of google.protobuf.Duration as specified in timestamp.proto.
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *timestamppb.Timestamp {
+ ts, err := TimestampProto(time.Now())
+ if err != nil {
+ panic("ptypes: time.Now() out of Timestamp range")
+ }
+ return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+ ts := ×tamppb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *timestamppb.Timestamp) string {
+ t, err := Timestamp(ts)
+ if err != nil {
+ return fmt.Sprintf("(%v)", err)
+ }
+ return t.Format(time.RFC3339Nano)
+}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 000000000..a76f80760
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,64 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+
+package timestamp
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/timestamp.proto.
+
+type Timestamp = timestamppb.Timestamp
+
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+ 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/gomodule/redigo/LICENSE b/vendor/github.com/gomodule/redigo/LICENSE
new file mode 100644
index 000000000..67db85882
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/gomodule/redigo/internal/commandinfo.go b/vendor/github.com/gomodule/redigo/internal/commandinfo.go
new file mode 100644
index 000000000..b763efbdd
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/internal/commandinfo.go
@@ -0,0 +1,54 @@
+// Copyright 2014 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package internal // import "github.com/gomodule/redigo/internal"
+
+import (
+ "strings"
+)
+
+const (
+ WatchState = 1 << iota
+ MultiState
+ SubscribeState
+ MonitorState
+)
+
+type CommandInfo struct {
+ Set, Clear int
+}
+
+var commandInfos = map[string]CommandInfo{
+ "WATCH": {Set: WatchState},
+ "UNWATCH": {Clear: WatchState},
+ "MULTI": {Set: MultiState},
+ "EXEC": {Clear: WatchState | MultiState},
+ "DISCARD": {Clear: WatchState | MultiState},
+ "PSUBSCRIBE": {Set: SubscribeState},
+ "SUBSCRIBE": {Set: SubscribeState},
+ "MONITOR": {Set: MonitorState},
+}
+
+func init() {
+ for n, ci := range commandInfos {
+ commandInfos[strings.ToLower(n)] = ci
+ }
+}
+
+func LookupCommandInfo(commandName string) CommandInfo {
+ if ci, ok := commandInfos[commandName]; ok {
+ return ci
+ }
+ return commandInfos[strings.ToUpper(commandName)]
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go
new file mode 100644
index 000000000..5aa0f32f2
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/conn.go
@@ -0,0 +1,673 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/url"
+ "regexp"
+ "strconv"
+ "sync"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*conn)(nil)
+)
+
+// conn is the low-level implementation of Conn
+type conn struct {
+ // Shared
+ mu sync.Mutex
+ pending int
+ err error
+ conn net.Conn
+
+ // Read
+ readTimeout time.Duration
+ br *bufio.Reader
+
+ // Write
+ writeTimeout time.Duration
+ bw *bufio.Writer
+
+ // Scratch space for formatting argument length.
+ // '*' or '$', length, "\r\n"
+ lenScratch [32]byte
+
+ // Scratch space for formatting integers and floats.
+ numScratch [40]byte
+}
+
+// DialTimeout acts like Dial but takes timeouts for establishing the
+// connection to the server, writing a command and reading a reply.
+//
+// Deprecated: Use Dial with options instead.
+func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
+ return Dial(network, address,
+ DialConnectTimeout(connectTimeout),
+ DialReadTimeout(readTimeout),
+ DialWriteTimeout(writeTimeout))
+}
+
+// DialOption specifies an option for dialing a Redis server.
+type DialOption struct {
+ f func(*dialOptions)
+}
+
+type dialOptions struct {
+ readTimeout time.Duration
+ writeTimeout time.Duration
+ dialer *net.Dialer
+ dial func(network, addr string) (net.Conn, error)
+ db int
+ password string
+ useTLS bool
+ skipVerify bool
+ tlsConfig *tls.Config
+}
+
+// DialReadTimeout specifies the timeout for reading a single command reply.
+func DialReadTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.readTimeout = d
+ }}
+}
+
+// DialWriteTimeout specifies the timeout for writing a single command.
+func DialWriteTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.writeTimeout = d
+ }}
+}
+
+// DialConnectTimeout specifies the timeout for connecting to the Redis server when
+// no DialNetDial option is specified.
+func DialConnectTimeout(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dialer.Timeout = d
+ }}
+}
+
+// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server
+// when no DialNetDial option is specified.
+// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then
+// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected.
+func DialKeepAlive(d time.Duration) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dialer.KeepAlive = d
+ }}
+}
+
+// DialNetDial specifies a custom dial function for creating TCP
+// connections, otherwise a net.Dialer customized via the other options is used.
+// DialNetDial overrides DialConnectTimeout and DialKeepAlive.
+func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.dial = dial
+ }}
+}
+
+// DialDatabase specifies the database to select when dialing a connection.
+func DialDatabase(db int) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.db = db
+ }}
+}
+
+// DialPassword specifies the password to use when connecting to
+// the Redis server.
+func DialPassword(password string) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.password = password
+ }}
+}
+
+// DialTLSConfig specifies the config to use when a TLS connection is dialed.
+// Has no effect when not dialing a TLS connection.
+func DialTLSConfig(c *tls.Config) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.tlsConfig = c
+ }}
+}
+
+// DialTLSSkipVerify disables server name verification when connecting over
+// TLS. Has no effect when not dialing a TLS connection.
+func DialTLSSkipVerify(skip bool) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.skipVerify = skip
+ }}
+}
+
+// DialUseTLS specifies whether TLS should be used when connecting to the
+// server. This option is ignore by DialURL.
+func DialUseTLS(useTLS bool) DialOption {
+ return DialOption{func(do *dialOptions) {
+ do.useTLS = useTLS
+ }}
+}
+
+// Dial connects to the Redis server at the given network and
+// address using the specified options.
+func Dial(network, address string, options ...DialOption) (Conn, error) {
+ do := dialOptions{
+ dialer: &net.Dialer{
+ KeepAlive: time.Minute * 5,
+ },
+ }
+ for _, option := range options {
+ option.f(&do)
+ }
+ if do.dial == nil {
+ do.dial = do.dialer.Dial
+ }
+
+ netConn, err := do.dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+
+ if do.useTLS {
+ var tlsConfig *tls.Config
+ if do.tlsConfig == nil {
+ tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify}
+ } else {
+ tlsConfig = cloneTLSConfig(do.tlsConfig)
+ }
+ if tlsConfig.ServerName == "" {
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ tlsConfig.ServerName = host
+ }
+
+ tlsConn := tls.Client(netConn, tlsConfig)
+ if err := tlsConn.Handshake(); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ netConn = tlsConn
+ }
+
+ c := &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: do.readTimeout,
+ writeTimeout: do.writeTimeout,
+ }
+
+ if do.password != "" {
+ if _, err := c.Do("AUTH", do.password); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ if do.db != 0 {
+ if _, err := c.Do("SELECT", do.db); err != nil {
+ netConn.Close()
+ return nil, err
+ }
+ }
+
+ return c, nil
+}
+
+var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
+
+// DialURL connects to a Redis server at the given URL using the Redis
+// URI scheme. URLs should follow the draft IANA specification for the
+// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
+func DialURL(rawurl string, options ...DialOption) (Conn, error) {
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return nil, err
+ }
+
+ if u.Scheme != "redis" && u.Scheme != "rediss" {
+ return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
+ }
+
+ // As per the IANA draft spec, the host defaults to localhost and
+ // the port defaults to 6379.
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // assume port is missing
+ host = u.Host
+ port = "6379"
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ address := net.JoinHostPort(host, port)
+
+ if u.User != nil {
+ password, isSet := u.User.Password()
+ if isSet {
+ options = append(options, DialPassword(password))
+ }
+ }
+
+ match := pathDBRegexp.FindStringSubmatch(u.Path)
+ if len(match) == 2 {
+ db := 0
+ if len(match[1]) > 0 {
+ db, err = strconv.Atoi(match[1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+ }
+ }
+ if db != 0 {
+ options = append(options, DialDatabase(db))
+ }
+ } else if u.Path != "" {
+ return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
+ }
+
+ options = append(options, DialUseTLS(u.Scheme == "rediss"))
+
+ return Dial("tcp", address, options...)
+}
+
+// NewConn returns a new Redigo connection for the given net connection.
+func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
+ return &conn{
+ conn: netConn,
+ bw: bufio.NewWriter(netConn),
+ br: bufio.NewReader(netConn),
+ readTimeout: readTimeout,
+ writeTimeout: writeTimeout,
+ }
+}
+
+func (c *conn) Close() error {
+ c.mu.Lock()
+ err := c.err
+ if c.err == nil {
+ c.err = errors.New("redigo: closed")
+ err = c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) fatal(err error) error {
+ c.mu.Lock()
+ if c.err == nil {
+ c.err = err
+ // Close connection to force errors on subsequent calls and to unblock
+ // other reader or writer.
+ c.conn.Close()
+ }
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) Err() error {
+ c.mu.Lock()
+ err := c.err
+ c.mu.Unlock()
+ return err
+}
+
+func (c *conn) writeLen(prefix byte, n int) error {
+ c.lenScratch[len(c.lenScratch)-1] = '\n'
+ c.lenScratch[len(c.lenScratch)-2] = '\r'
+ i := len(c.lenScratch) - 3
+ for {
+ c.lenScratch[i] = byte('0' + n%10)
+ i -= 1
+ n = n / 10
+ if n == 0 {
+ break
+ }
+ }
+ c.lenScratch[i] = prefix
+ _, err := c.bw.Write(c.lenScratch[i:])
+ return err
+}
+
+func (c *conn) writeString(s string) error {
+ c.writeLen('$', len(s))
+ c.bw.WriteString(s)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeBytes(p []byte) error {
+ c.writeLen('$', len(p))
+ c.bw.Write(p)
+ _, err := c.bw.WriteString("\r\n")
+ return err
+}
+
+func (c *conn) writeInt64(n int64) error {
+ return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
+}
+
+func (c *conn) writeFloat64(n float64) error {
+ return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
+}
+
+func (c *conn) writeCommand(cmd string, args []interface{}) error {
+ c.writeLen('*', 1+len(args))
+ if err := c.writeString(cmd); err != nil {
+ return err
+ }
+ for _, arg := range args {
+ if err := c.writeArg(arg, true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) {
+ switch arg := arg.(type) {
+ case string:
+ return c.writeString(arg)
+ case []byte:
+ return c.writeBytes(arg)
+ case int:
+ return c.writeInt64(int64(arg))
+ case int64:
+ return c.writeInt64(arg)
+ case float64:
+ return c.writeFloat64(arg)
+ case bool:
+ if arg {
+ return c.writeString("1")
+ } else {
+ return c.writeString("0")
+ }
+ case nil:
+ return c.writeString("")
+ case Argument:
+ if argumentTypeOK {
+ return c.writeArg(arg.RedisArg(), false)
+ }
+ // See comment in default clause below.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ return c.writeBytes(buf.Bytes())
+ default:
+ // This default clause is intended to handle builtin numeric types.
+ // The function should return an error for other types, but this is not
+ // done for compatibility with previous versions of the package.
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, arg)
+ return c.writeBytes(buf.Bytes())
+ }
+}
+
+type protocolError string
+
+func (pe protocolError) Error() string {
+ return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
+}
+
+func (c *conn) readLine() ([]byte, error) {
+ p, err := c.br.ReadSlice('\n')
+ if err == bufio.ErrBufferFull {
+ return nil, protocolError("long response line")
+ }
+ if err != nil {
+ return nil, err
+ }
+ i := len(p) - 2
+ if i < 0 || p[i] != '\r' {
+ return nil, protocolError("bad response line terminator")
+ }
+ return p[:i], nil
+}
+
+// parseLen parses bulk string and array lengths.
+func parseLen(p []byte) (int, error) {
+ if len(p) == 0 {
+ return -1, protocolError("malformed length")
+ }
+
+ if p[0] == '-' && len(p) == 2 && p[1] == '1' {
+ // handle $-1 and $-1 null replies.
+ return -1, nil
+ }
+
+ var n int
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return -1, protocolError("illegal bytes in length")
+ }
+ n += int(b - '0')
+ }
+
+ return n, nil
+}
+
+// parseInt parses an integer reply.
+func parseInt(p []byte) (interface{}, error) {
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+
+ var negate bool
+ if p[0] == '-' {
+ negate = true
+ p = p[1:]
+ if len(p) == 0 {
+ return 0, protocolError("malformed integer")
+ }
+ }
+
+ var n int64
+ for _, b := range p {
+ n *= 10
+ if b < '0' || b > '9' {
+ return 0, protocolError("illegal bytes in length")
+ }
+ n += int64(b - '0')
+ }
+
+ if negate {
+ n = -n
+ }
+ return n, nil
+}
+
+var (
+ okReply interface{} = "OK"
+ pongReply interface{} = "PONG"
+)
+
+func (c *conn) readReply() (interface{}, error) {
+ line, err := c.readLine()
+ if err != nil {
+ return nil, err
+ }
+ if len(line) == 0 {
+ return nil, protocolError("short response line")
+ }
+ switch line[0] {
+ case '+':
+ switch {
+ case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
+ // Avoid allocation for frequent "+OK" response.
+ return okReply, nil
+ case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
+ // Avoid allocation in PING command benchmarks :)
+ return pongReply, nil
+ default:
+ return string(line[1:]), nil
+ }
+ case '-':
+ return Error(string(line[1:])), nil
+ case ':':
+ return parseInt(line[1:])
+ case '$':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ p := make([]byte, n)
+ _, err = io.ReadFull(c.br, p)
+ if err != nil {
+ return nil, err
+ }
+ if line, err := c.readLine(); err != nil {
+ return nil, err
+ } else if len(line) != 0 {
+ return nil, protocolError("bad bulk string format")
+ }
+ return p, nil
+ case '*':
+ n, err := parseLen(line[1:])
+ if n < 0 || err != nil {
+ return nil, err
+ }
+ r := make([]interface{}, n)
+ for i := range r {
+ r[i], err = c.readReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return r, nil
+ }
+ return nil, protocolError("unexpected response line")
+}
+
+func (c *conn) Send(cmd string, args ...interface{}) error {
+ c.mu.Lock()
+ c.pending += 1
+ c.mu.Unlock()
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.writeCommand(cmd, args); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Flush() error {
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+ if err := c.bw.Flush(); err != nil {
+ return c.fatal(err)
+ }
+ return nil
+}
+
+func (c *conn) Receive() (interface{}, error) {
+ return c.ReceiveWithTimeout(c.readTimeout)
+}
+
+func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) {
+ var deadline time.Time
+ if timeout != 0 {
+ deadline = time.Now().Add(timeout)
+ }
+ c.conn.SetReadDeadline(deadline)
+
+ if reply, err = c.readReply(); err != nil {
+ return nil, c.fatal(err)
+ }
+ // When using pub/sub, the number of receives can be greater than the
+ // number of sends. To enable normal use of the connection after
+ // unsubscribing from all channels, we do not decrement pending to a
+ // negative value.
+ //
+ // The pending field is decremented after the reply is read to handle the
+ // case where Receive is called before Send.
+ c.mu.Lock()
+ if c.pending > 0 {
+ c.pending -= 1
+ }
+ c.mu.Unlock()
+ if err, ok := reply.(Error); ok {
+ return nil, err
+ }
+ return
+}
+
+func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
+ return c.DoWithTimeout(c.readTimeout, cmd, args...)
+}
+
+func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) {
+ c.mu.Lock()
+ pending := c.pending
+ c.pending = 0
+ c.mu.Unlock()
+
+ if cmd == "" && pending == 0 {
+ return nil, nil
+ }
+
+ if c.writeTimeout != 0 {
+ c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
+ }
+
+ if cmd != "" {
+ if err := c.writeCommand(cmd, args); err != nil {
+ return nil, c.fatal(err)
+ }
+ }
+
+ if err := c.bw.Flush(); err != nil {
+ return nil, c.fatal(err)
+ }
+
+ var deadline time.Time
+ if readTimeout != 0 {
+ deadline = time.Now().Add(readTimeout)
+ }
+ c.conn.SetReadDeadline(deadline)
+
+ if cmd == "" {
+ reply := make([]interface{}, pending)
+ for i := range reply {
+ r, e := c.readReply()
+ if e != nil {
+ return nil, c.fatal(e)
+ }
+ reply[i] = r
+ }
+ return reply, nil
+ }
+
+ var err error
+ var reply interface{}
+ for i := 0; i <= pending; i++ {
+ var e error
+ if reply, e = c.readReply(); e != nil {
+ return nil, c.fatal(e)
+ }
+ if e, ok := reply.(Error); ok && err == nil {
+ err = e
+ }
+ }
+ return reply, err
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/doc.go b/vendor/github.com/gomodule/redigo/redis/doc.go
new file mode 100644
index 000000000..70ec1ea69
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/doc.go
@@ -0,0 +1,177 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package redis is a client for the Redis database.
+//
+// The Redigo FAQ (https://github.com/gomodule/redigo/wiki/FAQ) contains more
+// documentation about this package.
+//
+// Connections
+//
+// The Conn interface is the primary interface for working with Redis.
+// Applications create connections by calling the Dial, DialWithTimeout or
+// NewConn functions. In the future, functions will be added for creating
+// sharded and other types of connections.
+//
+// The application must call the connection Close method when the application
+// is done with the connection.
+//
+// Executing Commands
+//
+// The Conn interface has a generic method for executing Redis commands:
+//
+// Do(commandName string, args ...interface{}) (reply interface{}, err error)
+//
+// The Redis command reference (http://redis.io/commands) lists the available
+// commands. An example of using the Redis APPEND command is:
+//
+// n, err := conn.Do("APPEND", "key", "value")
+//
+// The Do method converts command arguments to bulk strings for transmission
+// to the server as follows:
+//
+// Go Type Conversion
+// []byte Sent as is
+// string Sent as is
+// int, int64 strconv.FormatInt(v)
+// float64 strconv.FormatFloat(v, 'g', -1, 64)
+// bool true -> "1", false -> "0"
+// nil ""
+// all other types fmt.Fprint(w, v)
+//
+// Redis command reply types are represented using the following Go types:
+//
+// Redis type Go type
+// error redis.Error
+// integer int64
+// simple string string
+// bulk string []byte or nil if value not present.
+// array []interface{} or nil if value not present.
+//
+// Use type assertions or the reply helper functions to convert from
+// interface{} to the specific Go type for the command result.
+//
+// Pipelining
+//
+// Connections support pipelining using the Send, Flush and Receive methods.
+//
+// Send(commandName string, args ...interface{}) error
+// Flush() error
+// Receive() (reply interface{}, err error)
+//
+// Send writes the command to the connection's output buffer. Flush flushes the
+// connection's output buffer to the server. Receive reads a single reply from
+// the server. The following example shows a simple pipeline.
+//
+// c.Send("SET", "foo", "bar")
+// c.Send("GET", "foo")
+// c.Flush()
+// c.Receive() // reply from SET
+// v, err = c.Receive() // reply from GET
+//
+// The Do method combines the functionality of the Send, Flush and Receive
+// methods. The Do method starts by writing the command and flushing the output
+// buffer. Next, the Do method receives all pending replies including the reply
+// for the command just sent by Do. If any of the received replies is an error,
+// then Do returns the error. If there are no errors, then Do returns the last
+// reply. If the command argument to the Do method is "", then the Do method
+// will flush the output buffer and receive pending replies without sending a
+// command.
+//
+// Use the Send and Do methods to implement pipelined transactions.
+//
+// c.Send("MULTI")
+// c.Send("INCR", "foo")
+// c.Send("INCR", "bar")
+// r, err := c.Do("EXEC")
+// fmt.Println(r) // prints [1, 1]
+//
+// Concurrency
+//
+// Connections support one concurrent caller to the Receive method and one
+// concurrent caller to the Send and Flush methods. No other concurrency is
+// supported including concurrent calls to the Do method.
+//
+// For full concurrent access to Redis, use the thread-safe Pool to get, use
+// and release a connection from within a goroutine. Connections returned from
+// a Pool have the concurrency restrictions described in the previous
+// paragraph.
+//
+// Publish and Subscribe
+//
+// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
+//
+// c.Send("SUBSCRIBE", "example")
+// c.Flush()
+// for {
+// reply, err := c.Receive()
+// if err != nil {
+// return err
+// }
+// // process pushed message
+// }
+//
+// The PubSubConn type wraps a Conn with convenience methods for implementing
+// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
+// send and flush a subscription management command. The receive method
+// converts a pushed message to convenient types for use in a type switch.
+//
+// psc := redis.PubSubConn{Conn: c}
+// psc.Subscribe("example")
+// for {
+// switch v := psc.Receive().(type) {
+// case redis.Message:
+// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
+// case redis.Subscription:
+// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
+// case error:
+// return v
+// }
+// }
+//
+// Reply Helpers
+//
+// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
+// to a value of a specific type. To allow convenient wrapping of calls to the
+// connection Do and Receive methods, the functions take a second argument of
+// type error. If the error is non-nil, then the helper function returns the
+// error. If the error is nil, the function converts the reply to the specified
+// type:
+//
+// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
+// if err != nil {
+// // handle error return from c.Do or type conversion error.
+// }
+//
+// The Scan function converts elements of a array reply to Go types:
+//
+// var value1 int
+// var value2 string
+// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
+// if err != nil {
+// // handle error
+// }
+// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
+// // handle error
+// }
+//
+// Errors
+//
+// Connection methods return error replies from the server as type redis.Error.
+//
+// Call the connection Err() method to determine if the connection encountered
+// non-recoverable error such as a network error or protocol parsing error. If
+// Err() returns a non-nil value, then the connection is not usable and should
+// be closed.
+package redis // import "github.com/gomodule/redigo/redis"
diff --git a/vendor/github.com/gomodule/redigo/redis/go16.go b/vendor/github.com/gomodule/redigo/redis/go16.go
new file mode 100644
index 000000000..f6b1a7ccd
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/go16.go
@@ -0,0 +1,27 @@
+// +build !go1.7
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ }
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/go17.go b/vendor/github.com/gomodule/redigo/redis/go17.go
new file mode 100644
index 000000000..5f3637911
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/go17.go
@@ -0,0 +1,29 @@
+// +build go1.7,!go1.8
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: cfg.Rand,
+ Time: cfg.Time,
+ Certificates: cfg.Certificates,
+ NameToCertificate: cfg.NameToCertificate,
+ GetCertificate: cfg.GetCertificate,
+ RootCAs: cfg.RootCAs,
+ NextProtos: cfg.NextProtos,
+ ServerName: cfg.ServerName,
+ ClientAuth: cfg.ClientAuth,
+ ClientCAs: cfg.ClientCAs,
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ CipherSuites: cfg.CipherSuites,
+ PreferServerCipherSuites: cfg.PreferServerCipherSuites,
+ ClientSessionCache: cfg.ClientSessionCache,
+ MinVersion: cfg.MinVersion,
+ MaxVersion: cfg.MaxVersion,
+ CurvePreferences: cfg.CurvePreferences,
+ DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled,
+ Renegotiation: cfg.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/go18.go b/vendor/github.com/gomodule/redigo/redis/go18.go
new file mode 100644
index 000000000..558363be3
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/go18.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package redis
+
+import "crypto/tls"
+
+func cloneTLSConfig(cfg *tls.Config) *tls.Config {
+ return cfg.Clone()
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/log.go b/vendor/github.com/gomodule/redigo/redis/log.go
new file mode 100644
index 000000000..b2996611c
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/log.go
@@ -0,0 +1,134 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "time"
+)
+
+var (
+ _ ConnWithTimeout = (*loggingConn)(nil)
+)
+
+// NewLoggingConn returns a logging wrapper around a connection.
+func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
+ if prefix != "" {
+ prefix = prefix + "."
+ }
+ return &loggingConn{conn, logger, prefix}
+}
+
+type loggingConn struct {
+ Conn
+ logger *log.Logger
+ prefix string
+}
+
+func (c *loggingConn) Close() error {
+ err := c.Conn.Close()
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
+ c.logger.Output(2, buf.String())
+ return err
+}
+
+func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
+ const chop = 32
+ switch v := v.(type) {
+ case []byte:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case string:
+ if len(v) > chop {
+ fmt.Fprintf(buf, "%q...", v[:chop])
+ } else {
+ fmt.Fprintf(buf, "%q", v)
+ }
+ case []interface{}:
+ if len(v) == 0 {
+ buf.WriteString("[]")
+ } else {
+ sep := "["
+ fin := "]"
+ if len(v) > chop {
+ v = v[:chop]
+ fin = "...]"
+ }
+ for _, vv := range v {
+ buf.WriteString(sep)
+ c.printValue(buf, vv)
+ sep = ", "
+ }
+ buf.WriteString(fin)
+ }
+ default:
+ fmt.Fprint(buf, v)
+ }
+}
+
+func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
+ if method != "Receive" {
+ buf.WriteString(commandName)
+ for _, arg := range args {
+ buf.WriteString(", ")
+ c.printValue(&buf, arg)
+ }
+ }
+ buf.WriteString(") -> (")
+ if method != "Send" {
+ c.printValue(&buf, reply)
+ buf.WriteString(", ")
+ }
+ fmt.Fprintf(&buf, "%v)", err)
+ c.logger.Output(3, buf.String())
+}
+
+func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := c.Conn.Do(commandName, args...)
+ c.print("Do", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) {
+ reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...)
+ c.print("DoWithTimeout", commandName, args, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) Send(commandName string, args ...interface{}) error {
+ err := c.Conn.Send(commandName, args...)
+ c.print("Send", commandName, args, nil, err)
+ return err
+}
+
+func (c *loggingConn) Receive() (interface{}, error) {
+ reply, err := c.Conn.Receive()
+ c.print("Receive", "", nil, reply, err)
+ return reply, err
+}
+
+func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) {
+ reply, err := ReceiveWithTimeout(c.Conn, timeout)
+ c.print("ReceiveWithTimeout", "", nil, reply, err)
+ return reply, err
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/pool.go b/vendor/github.com/gomodule/redigo/redis/pool.go
new file mode 100644
index 000000000..d77da3254
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/pool.go
@@ -0,0 +1,562 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "errors"
+ "io"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/gomodule/redigo/internal"
+)
+
+var (
+ _ ConnWithTimeout = (*activeConn)(nil)
+ _ ConnWithTimeout = (*errorConn)(nil)
+)
+
+var nowFunc = time.Now // for testing
+
+// ErrPoolExhausted is returned from a pool connection method (Do, Send,
+// Receive, Flush, Err) when the maximum number of database connections in the
+// pool has been reached.
+var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
+
+var (
+ errPoolClosed = errors.New("redigo: connection pool closed")
+ errConnClosed = errors.New("redigo: connection closed")
+)
+
+// Pool maintains a pool of connections. The application calls the Get method
+// to get a connection from the pool and the connection's Close method to
+// return the connection's resources to the pool.
+//
+// The following example shows how to use a pool in a web application. The
+// application creates a pool at application startup and makes it available to
+// request handlers using a package level variable. The pool configuration used
+// here is an example, not a recommendation.
+//
+// func newPool(addr string) *redis.Pool {
+// return &redis.Pool{
+// MaxIdle: 3,
+// IdleTimeout: 240 * time.Second,
+// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) },
+// }
+// }
+//
+// var (
+// pool *redis.Pool
+// redisServer = flag.String("redisServer", ":6379", "")
+// )
+//
+// func main() {
+// flag.Parse()
+// pool = newPool(*redisServer)
+// ...
+// }
+//
+// A request handler gets a connection from the pool and closes the connection
+// when the handler is done:
+//
+// func serveHome(w http.ResponseWriter, r *http.Request) {
+// conn := pool.Get()
+// defer conn.Close()
+// ...
+// }
+//
+// Use the Dial function to authenticate connections with the AUTH command or
+// select a database with the SELECT command:
+//
+// pool := &redis.Pool{
+// // Other pool configuration not shown in this example.
+// Dial: func () (redis.Conn, error) {
+// c, err := redis.Dial("tcp", server)
+// if err != nil {
+// return nil, err
+// }
+// if _, err := c.Do("AUTH", password); err != nil {
+// c.Close()
+// return nil, err
+// }
+// if _, err := c.Do("SELECT", db); err != nil {
+// c.Close()
+// return nil, err
+// }
+// return c, nil
+// },
+// }
+//
+// Use the TestOnBorrow function to check the health of an idle connection
+// before the connection is returned to the application. This example PINGs
+// connections that have been idle more than a minute:
+//
+// pool := &redis.Pool{
+// // Other pool configuration not shown in this example.
+// TestOnBorrow: func(c redis.Conn, t time.Time) error {
+// if time.Since(t) < time.Minute {
+// return nil
+// }
+// _, err := c.Do("PING")
+// return err
+// },
+// }
+//
+type Pool struct {
+ // Dial is an application supplied function for creating and configuring a
+ // connection.
+ //
+ // The connection returned from Dial must not be in a special state
+ // (subscribed to pubsub channel, transaction started, ...).
+ Dial func() (Conn, error)
+
+ // TestOnBorrow is an optional application supplied function for checking
+ // the health of an idle connection before the connection is used again by
+ // the application. Argument t is the time that the connection was returned
+ // to the pool. If the function returns an error, then the connection is
+ // closed.
+ TestOnBorrow func(c Conn, t time.Time) error
+
+ // Maximum number of idle connections in the pool.
+ MaxIdle int
+
+ // Maximum number of connections allocated by the pool at a given time.
+ // When zero, there is no limit on the number of connections in the pool.
+ MaxActive int
+
+ // Close connections after remaining idle for this duration. If the value
+ // is zero, then idle connections are not closed. Applications should set
+ // the timeout to a value less than the server's timeout.
+ IdleTimeout time.Duration
+
+ // If Wait is true and the pool is at the MaxActive limit, then Get() waits
+ // for a connection to be returned to the pool before returning.
+ Wait bool
+
+ // Close connections older than this duration. If the value is zero, then
+ // the pool does not close connections based on age.
+ MaxConnLifetime time.Duration
+
+ chInitialized uint32 // set to 1 when field ch is initialized
+
+ mu sync.Mutex // mu protects the following fields
+ closed bool // set to true when the pool is closed.
+ active int // the number of open connections in the pool
+ ch chan struct{} // limits open connections when p.Wait is true
+ idle idleList // idle connections
+}
+
+// NewPool creates a new pool.
+//
+// Deprecated: Initialize the Pool directory as shown in the example.
+func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
+ return &Pool{Dial: newFn, MaxIdle: maxIdle}
+}
+
+// Get gets a connection. The application must close the returned connection.
+// This method always returns a valid connection so that applications can defer
+// error handling to the first use of the connection. If there is an error
+// getting an underlying connection, then the connection Err, Do, Send, Flush
+// and Receive methods return that error.
+func (p *Pool) Get() Conn {
+ pc, err := p.get(nil)
+ if err != nil {
+ return errorConn{err}
+ }
+ return &activeConn{p: p, pc: pc}
+}
+
+// PoolStats contains pool statistics.
+type PoolStats struct {
+ // ActiveCount is the number of connections in the pool. The count includes
+ // idle connections and connections in use.
+ ActiveCount int
+ // IdleCount is the number of idle connections in the pool.
+ IdleCount int
+}
+
+// Stats returns pool's statistics.
+func (p *Pool) Stats() PoolStats {
+ p.mu.Lock()
+ stats := PoolStats{
+ ActiveCount: p.active,
+ IdleCount: p.idle.count,
+ }
+ p.mu.Unlock()
+
+ return stats
+}
+
+// ActiveCount returns the number of connections in the pool. The count
+// includes idle connections and connections in use.
+func (p *Pool) ActiveCount() int {
+ p.mu.Lock()
+ active := p.active
+ p.mu.Unlock()
+ return active
+}
+
+// IdleCount returns the number of idle connections in the pool.
+func (p *Pool) IdleCount() int {
+ p.mu.Lock()
+ idle := p.idle.count
+ p.mu.Unlock()
+ return idle
+}
+
+// Close releases the resources used by the pool.
+func (p *Pool) Close() error {
+ p.mu.Lock()
+ if p.closed {
+ p.mu.Unlock()
+ return nil
+ }
+ p.closed = true
+ p.active -= p.idle.count
+ pc := p.idle.front
+ p.idle.count = 0
+ p.idle.front, p.idle.back = nil, nil
+ if p.ch != nil {
+ close(p.ch)
+ }
+ p.mu.Unlock()
+ for ; pc != nil; pc = pc.next {
+ pc.c.Close()
+ }
+ return nil
+}
+
+func (p *Pool) lazyInit() {
+ // Fast path.
+ if atomic.LoadUint32(&p.chInitialized) == 1 {
+ return
+ }
+ // Slow path.
+ p.mu.Lock()
+ if p.chInitialized == 0 {
+ p.ch = make(chan struct{}, p.MaxActive)
+ if p.closed {
+ close(p.ch)
+ } else {
+ for i := 0; i < p.MaxActive; i++ {
+ p.ch <- struct{}{}
+ }
+ }
+ atomic.StoreUint32(&p.chInitialized, 1)
+ }
+ p.mu.Unlock()
+}
+
+// get prunes stale connections and returns a connection from the idle list or
+// creates a new connection.
+func (p *Pool) get(ctx interface {
+ Done() <-chan struct{}
+ Err() error
+}) (*poolConn, error) {
+
+ // Handle limit for p.Wait == true.
+ if p.Wait && p.MaxActive > 0 {
+ p.lazyInit()
+ if ctx == nil {
+ <-p.ch
+ } else {
+ select {
+ case <-p.ch:
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+ }
+
+ p.mu.Lock()
+
+ // Prune stale connections at the back of the idle list.
+ if p.IdleTimeout > 0 {
+ n := p.idle.count
+ for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ {
+ pc := p.idle.back
+ p.idle.popBack()
+ p.mu.Unlock()
+ pc.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+ }
+
+ // Get idle connection from the front of idle list.
+ for p.idle.front != nil {
+ pc := p.idle.front
+ p.idle.popFront()
+ p.mu.Unlock()
+ if (p.TestOnBorrow == nil || p.TestOnBorrow(pc.c, pc.t) == nil) &&
+ (p.MaxConnLifetime == 0 || nowFunc().Sub(pc.created) < p.MaxConnLifetime) {
+ return pc, nil
+ }
+ pc.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+
+ // Check for pool closed before dialing a new connection.
+ if p.closed {
+ p.mu.Unlock()
+ return nil, errors.New("redigo: get on closed pool")
+ }
+
+ // Handle limit for p.Wait == false.
+ if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive {
+ p.mu.Unlock()
+ return nil, ErrPoolExhausted
+ }
+
+ p.active++
+ p.mu.Unlock()
+ c, err := p.Dial()
+ if err != nil {
+ c = nil
+ p.mu.Lock()
+ p.active--
+ if p.ch != nil && !p.closed {
+ p.ch <- struct{}{}
+ }
+ p.mu.Unlock()
+ }
+ return &poolConn{c: c, created: nowFunc()}, err
+}
+
+func (p *Pool) put(pc *poolConn, forceClose bool) error {
+ p.mu.Lock()
+ if !p.closed && !forceClose {
+ pc.t = nowFunc()
+ p.idle.pushFront(pc)
+ if p.idle.count > p.MaxIdle {
+ pc = p.idle.back
+ p.idle.popBack()
+ } else {
+ pc = nil
+ }
+ }
+
+ if pc != nil {
+ p.mu.Unlock()
+ pc.c.Close()
+ p.mu.Lock()
+ p.active--
+ }
+
+ if p.ch != nil && !p.closed {
+ p.ch <- struct{}{}
+ }
+ p.mu.Unlock()
+ return nil
+}
+
+type activeConn struct {
+ p *Pool
+ pc *poolConn
+ state int
+}
+
+var (
+ sentinel []byte
+ sentinelOnce sync.Once
+)
+
+func initSentinel() {
+ p := make([]byte, 64)
+ if _, err := rand.Read(p); err == nil {
+ sentinel = p
+ } else {
+ h := sha1.New()
+ io.WriteString(h, "Oops, rand failed. Use time instead.")
+ io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
+ sentinel = h.Sum(nil)
+ }
+}
+
+func (ac *activeConn) Close() error {
+ pc := ac.pc
+ if pc == nil {
+ return nil
+ }
+ ac.pc = nil
+
+ if ac.state&internal.MultiState != 0 {
+ pc.c.Send("DISCARD")
+ ac.state &^= (internal.MultiState | internal.WatchState)
+ } else if ac.state&internal.WatchState != 0 {
+ pc.c.Send("UNWATCH")
+ ac.state &^= internal.WatchState
+ }
+ if ac.state&internal.SubscribeState != 0 {
+ pc.c.Send("UNSUBSCRIBE")
+ pc.c.Send("PUNSUBSCRIBE")
+ // To detect the end of the message stream, ask the server to echo
+ // a sentinel value and read until we see that value.
+ sentinelOnce.Do(initSentinel)
+ pc.c.Send("ECHO", sentinel)
+ pc.c.Flush()
+ for {
+ p, err := pc.c.Receive()
+ if err != nil {
+ break
+ }
+ if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
+ ac.state &^= internal.SubscribeState
+ break
+ }
+ }
+ }
+ pc.c.Do("")
+ ac.p.put(pc, ac.state != 0 || pc.c.Err() != nil)
+ return nil
+}
+
+func (ac *activeConn) Err() error {
+ pc := ac.pc
+ if pc == nil {
+ return errConnClosed
+ }
+ return pc.c.Err()
+}
+
+func (ac *activeConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ ci := internal.LookupCommandInfo(commandName)
+ ac.state = (ac.state | ci.Set) &^ ci.Clear
+ return pc.c.Do(commandName, args...)
+}
+
+func (ac *activeConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ cwt, ok := pc.c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ ci := internal.LookupCommandInfo(commandName)
+ ac.state = (ac.state | ci.Set) &^ ci.Clear
+ return cwt.DoWithTimeout(timeout, commandName, args...)
+}
+
+func (ac *activeConn) Send(commandName string, args ...interface{}) error {
+ pc := ac.pc
+ if pc == nil {
+ return errConnClosed
+ }
+ ci := internal.LookupCommandInfo(commandName)
+ ac.state = (ac.state | ci.Set) &^ ci.Clear
+ return pc.c.Send(commandName, args...)
+}
+
+func (ac *activeConn) Flush() error {
+ pc := ac.pc
+ if pc == nil {
+ return errConnClosed
+ }
+ return pc.c.Flush()
+}
+
+func (ac *activeConn) Receive() (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ return pc.c.Receive()
+}
+
+func (ac *activeConn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) {
+ pc := ac.pc
+ if pc == nil {
+ return nil, errConnClosed
+ }
+ cwt, ok := pc.c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.ReceiveWithTimeout(timeout)
+}
+
+type errorConn struct{ err error }
+
+func (ec errorConn) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
+func (ec errorConn) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) {
+ return nil, ec.err
+}
+func (ec errorConn) Send(string, ...interface{}) error { return ec.err }
+func (ec errorConn) Err() error { return ec.err }
+func (ec errorConn) Close() error { return nil }
+func (ec errorConn) Flush() error { return ec.err }
+func (ec errorConn) Receive() (interface{}, error) { return nil, ec.err }
+func (ec errorConn) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err }
+
+type idleList struct {
+ count int
+ front, back *poolConn
+}
+
+type poolConn struct {
+ c Conn
+ t time.Time
+ created time.Time
+ next, prev *poolConn
+}
+
+func (l *idleList) pushFront(pc *poolConn) {
+ pc.next = l.front
+ pc.prev = nil
+ if l.count == 0 {
+ l.back = pc
+ } else {
+ l.front.prev = pc
+ }
+ l.front = pc
+ l.count++
+ return
+}
+
+func (l *idleList) popFront() {
+ pc := l.front
+ l.count--
+ if l.count == 0 {
+ l.front, l.back = nil, nil
+ } else {
+ pc.next.prev = nil
+ l.front = pc.next
+ }
+ pc.next, pc.prev = nil, nil
+}
+
+func (l *idleList) popBack() {
+ pc := l.back
+ l.count--
+ if l.count == 0 {
+ l.front, l.back = nil, nil
+ } else {
+ pc.prev.next = nil
+ l.back = pc.prev
+ }
+ pc.next, pc.prev = nil, nil
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/pool17.go b/vendor/github.com/gomodule/redigo/redis/pool17.go
new file mode 100644
index 000000000..c1ea18ee3
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/pool17.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// +build go1.7
+
+package redis
+
+import "context"
+
+// GetContext gets a connection using the provided context.
+//
+// The provided Context must be non-nil. If the context expires before the
+// connection is complete, an error is returned. Any expiration on the context
+// will not affect the returned connection.
+//
+// If the function completes without error, then the application must close the
+// returned connection.
+func (p *Pool) GetContext(ctx context.Context) (Conn, error) {
+ pc, err := p.get(ctx)
+ if err != nil {
+ return errorConn{err}, err
+ }
+ return &activeConn{p: p, pc: pc}, nil
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/pubsub.go b/vendor/github.com/gomodule/redigo/redis/pubsub.go
new file mode 100644
index 000000000..2da60211d
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/pubsub.go
@@ -0,0 +1,148 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "time"
+)
+
+// Subscription represents a subscribe or unsubscribe notification.
+type Subscription struct {
+ // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
+ Kind string
+
+ // The channel that was changed.
+ Channel string
+
+ // The current number of subscriptions for connection.
+ Count int
+}
+
+// Message represents a message notification.
+type Message struct {
+ // The originating channel.
+ Channel string
+
+ // The matched pattern, if any
+ Pattern string
+
+ // The message data.
+ Data []byte
+}
+
+// Pong represents a pubsub pong notification.
+type Pong struct {
+ Data string
+}
+
+// PubSubConn wraps a Conn with convenience methods for subscribers.
+type PubSubConn struct {
+ Conn Conn
+}
+
+// Close closes the connection.
+func (c PubSubConn) Close() error {
+ return c.Conn.Close()
+}
+
+// Subscribe subscribes the connection to the specified channels.
+func (c PubSubConn) Subscribe(channel ...interface{}) error {
+ c.Conn.Send("SUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PSubscribe subscribes the connection to the given patterns.
+func (c PubSubConn) PSubscribe(channel ...interface{}) error {
+ c.Conn.Send("PSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Unsubscribe unsubscribes the connection from the given channels, or from all
+// of them if none is given.
+func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
+ c.Conn.Send("UNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// PUnsubscribe unsubscribes the connection from the given patterns, or from all
+// of them if none is given.
+func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
+ c.Conn.Send("PUNSUBSCRIBE", channel...)
+ return c.Conn.Flush()
+}
+
+// Ping sends a PING to the server with the specified data.
+//
+// The connection must be subscribed to at least one channel or pattern when
+// calling this method.
+func (c PubSubConn) Ping(data string) error {
+ c.Conn.Send("PING", data)
+ return c.Conn.Flush()
+}
+
+// Receive returns a pushed message as a Subscription, Message, Pong or error.
+// The return value is intended to be used directly in a type switch as
+// illustrated in the PubSubConn example.
+func (c PubSubConn) Receive() interface{} {
+ return c.receiveInternal(c.Conn.Receive())
+}
+
+// ReceiveWithTimeout is like Receive, but it allows the application to
+// override the connection's default timeout.
+func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} {
+ return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout))
+}
+
+func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} {
+ reply, err := Values(replyArg, errArg)
+ if err != nil {
+ return err
+ }
+
+ var kind string
+ reply, err = Scan(reply, &kind)
+ if err != nil {
+ return err
+ }
+
+ switch kind {
+ case "message":
+ var m Message
+ if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
+ return err
+ }
+ return m
+ case "pmessage":
+ var m Message
+ if _, err := Scan(reply, &m.Pattern, &m.Channel, &m.Data); err != nil {
+ return err
+ }
+ return m
+ case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
+ s := Subscription{Kind: kind}
+ if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
+ return err
+ }
+ return s
+ case "pong":
+ var p Pong
+ if _, err := Scan(reply, &p.Data); err != nil {
+ return err
+ }
+ return p
+ }
+ return errors.New("redigo: unknown pubsub notification")
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/redis.go b/vendor/github.com/gomodule/redigo/redis/redis.go
new file mode 100644
index 000000000..141fa4a91
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/redis.go
@@ -0,0 +1,117 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "time"
+)
+
+// Error represents an error returned in a command reply.
+type Error string
+
+func (err Error) Error() string { return string(err) }
+
+// Conn represents a connection to a Redis server.
+type Conn interface {
+ // Close closes the connection.
+ Close() error
+
+ // Err returns a non-nil value when the connection is not usable.
+ Err() error
+
+ // Do sends a command to the server and returns the received reply.
+ Do(commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Send writes the command to the client's output buffer.
+ Send(commandName string, args ...interface{}) error
+
+ // Flush flushes the output buffer to the Redis server.
+ Flush() error
+
+ // Receive receives a single reply from the Redis server
+ Receive() (reply interface{}, err error)
+}
+
+// Argument is the interface implemented by an object which wants to control how
+// the object is converted to Redis bulk strings.
+type Argument interface {
+ // RedisArg returns a value to be encoded as a bulk string per the
+ // conversions listed in the section 'Executing Commands'.
+ // Implementations should typically return a []byte or string.
+ RedisArg() interface{}
+}
+
+// Scanner is implemented by an object which wants to control its value is
+// interpreted when read from Redis.
+type Scanner interface {
+ // RedisScan assigns a value from a Redis value. The argument src is one of
+ // the reply types listed in the section `Executing Commands`.
+ //
+ // An error should be returned if the value cannot be stored without
+ // loss of information.
+ RedisScan(src interface{}) error
+}
+
+// ConnWithTimeout is an optional interface that allows the caller to override
+// a connection's default read timeout. This interface is useful for executing
+// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the
+// server.
+//
+// A connection's default read timeout is set with the DialReadTimeout dial
+// option. Applications should rely on the default timeout for commands that do
+// not block at the server.
+//
+// All of the Conn implementations in this package satisfy the ConnWithTimeout
+// interface.
+//
+// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify
+// use of this interface.
+type ConnWithTimeout interface {
+ Conn
+
+ // Do sends a command to the server and returns the received reply.
+ // The timeout overrides the read timeout set when dialing the
+ // connection.
+ DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error)
+
+ // Receive receives a single reply from the Redis server. The timeout
+ // overrides the read timeout set when dialing the connection.
+ ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error)
+}
+
+var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout")
+
+// DoWithTimeout executes a Redis command with the specified read timeout. If
+// the connection does not satisfy the ConnWithTimeout interface, then an error
+// is returned.
+func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) {
+ cwt, ok := c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.DoWithTimeout(timeout, cmd, args...)
+}
+
+// ReceiveWithTimeout receives a reply with the specified read timeout. If the
+// connection does not satisfy the ConnWithTimeout interface, then an error is
+// returned.
+func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) {
+ cwt, ok := c.(ConnWithTimeout)
+ if !ok {
+ return nil, errTimeoutNotSupported
+ }
+ return cwt.ReceiveWithTimeout(timeout)
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/reply.go b/vendor/github.com/gomodule/redigo/redis/reply.go
new file mode 100644
index 000000000..c2b3b2b6e
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/reply.go
@@ -0,0 +1,479 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+// ErrNil indicates that a reply value is nil.
+var ErrNil = errors.New("redigo: nil returned")
+
+// Int is a helper that converts a command reply to an integer. If err is not
+// equal to nil, then Int returns 0, err. Otherwise, Int converts the
+// reply to an int as follows:
+//
+// Reply type Result
+// integer int(reply), nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int(reply interface{}, err error) (int, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ x := int(reply)
+ if int64(x) != reply {
+ return 0, strconv.ErrRange
+ }
+ return x, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 0)
+ return int(n), err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
+}
+
+// Int64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Int64(reply interface{}, err error) (int64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply, nil
+ case []byte:
+ n, err := strconv.ParseInt(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
+}
+
+var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
+
+// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
+// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
+// reply to an int64 as follows:
+//
+// Reply type Result
+// integer reply, nil
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Uint64(reply interface{}, err error) (uint64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ if reply < 0 {
+ return 0, errNegativeInt
+ }
+ return uint64(reply), nil
+ case []byte:
+ n, err := strconv.ParseUint(string(reply), 10, 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
+}
+
+// Float64 is a helper that converts a command reply to 64 bit float. If err is
+// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
+// the reply to an int as follows:
+//
+// Reply type Result
+// bulk string parsed reply, nil
+// nil 0, ErrNil
+// other 0, error
+func Float64(reply interface{}, err error) (float64, error) {
+ if err != nil {
+ return 0, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ n, err := strconv.ParseFloat(string(reply), 64)
+ return n, err
+ case nil:
+ return 0, ErrNil
+ case Error:
+ return 0, reply
+ }
+ return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
+}
+
+// String is a helper that converts a command reply to a string. If err is not
+// equal to nil, then String returns "", err. Otherwise String converts the
+// reply to a string as follows:
+//
+// Reply type Result
+// bulk string string(reply), nil
+// simple string reply, nil
+// nil "", ErrNil
+// other "", error
+func String(reply interface{}, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return string(reply), nil
+ case string:
+ return reply, nil
+ case nil:
+ return "", ErrNil
+ case Error:
+ return "", reply
+ }
+ return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
+}
+
+// Bytes is a helper that converts a command reply to a slice of bytes. If err
+// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
+// the reply to a slice of bytes as follows:
+//
+// Reply type Result
+// bulk string reply, nil
+// simple string []byte(reply), nil
+// nil nil, ErrNil
+// other nil, error
+func Bytes(reply interface{}, err error) ([]byte, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []byte:
+ return reply, nil
+ case string:
+ return []byte(reply), nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
+}
+
+// Bool is a helper that converts a command reply to a boolean. If err is not
+// equal to nil, then Bool returns false, err. Otherwise Bool converts the
+// reply to boolean as follows:
+//
+// Reply type Result
+// integer value != 0, nil
+// bulk string strconv.ParseBool(reply)
+// nil false, ErrNil
+// other false, error
+func Bool(reply interface{}, err error) (bool, error) {
+ if err != nil {
+ return false, err
+ }
+ switch reply := reply.(type) {
+ case int64:
+ return reply != 0, nil
+ case []byte:
+ return strconv.ParseBool(string(reply))
+ case nil:
+ return false, ErrNil
+ case Error:
+ return false, reply
+ }
+ return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
+}
+
+// MultiBulk is a helper that converts an array command reply to a []interface{}.
+//
+// Deprecated: Use Values instead.
+func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
+
+// Values is a helper that converts an array command reply to a []interface{}.
+// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
+// converts the reply as follows:
+//
+// Reply type Result
+// array reply, nil
+// nil nil, ErrNil
+// other nil, error
+func Values(reply interface{}, err error) ([]interface{}, error) {
+ if err != nil {
+ return nil, err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ return reply, nil
+ case nil:
+ return nil, ErrNil
+ case Error:
+ return nil, reply
+ }
+ return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
+}
+
+func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error {
+ if err != nil {
+ return err
+ }
+ switch reply := reply.(type) {
+ case []interface{}:
+ makeSlice(len(reply))
+ for i := range reply {
+ if reply[i] == nil {
+ continue
+ }
+ if err := assign(i, reply[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+ case nil:
+ return ErrNil
+ case Error:
+ return reply
+ }
+ return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply)
+}
+
+// Float64s is a helper that converts an array command reply to a []float64. If
+// err is not equal to nil, then Float64s returns nil, err. Nil array items are
+// converted to 0 in the output slice. Floats64 returns an error if an array
+// item is not a bulk string or nil.
+func Float64s(reply interface{}, err error) ([]float64, error) {
+ var result []float64
+ err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error {
+ p, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v)
+ }
+ f, err := strconv.ParseFloat(string(p), 64)
+ result[i] = f
+ return err
+ })
+ return result, err
+}
+
+// Strings is a helper that converts an array command reply to a []string. If
+// err is not equal to nil, then Strings returns nil, err. Nil array items are
+// converted to "" in the output slice. Strings returns an error if an array
+// item is not a bulk string or nil.
+func Strings(reply interface{}, err error) ([]string, error) {
+ var result []string
+ err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case string:
+ result[i] = v
+ return nil
+ case []byte:
+ result[i] = string(v)
+ return nil
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// ByteSlices is a helper that converts an array command reply to a [][]byte.
+// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
+// items are stay nil. ByteSlices returns an error if an array item is not a
+// bulk string or nil.
+func ByteSlices(reply interface{}, err error) ([][]byte, error) {
+ var result [][]byte
+ err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error {
+ p, ok := v.([]byte)
+ if !ok {
+ return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v)
+ }
+ result[i] = p
+ return nil
+ })
+ return result, err
+}
+
+// Int64s is a helper that converts an array command reply to a []int64.
+// If err is not equal to nil, then Int64s returns nil, err. Nil array
+// items are stay nil. Int64s returns an error if an array item is not a
+// bulk string or nil.
+func Int64s(reply interface{}, err error) ([]int64, error) {
+ var result []int64
+ err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case int64:
+ result[i] = v
+ return nil
+ case []byte:
+ n, err := strconv.ParseInt(string(v), 10, 64)
+ result[i] = n
+ return err
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// Ints is a helper that converts an array command reply to a []in.
+// If err is not equal to nil, then Ints returns nil, err. Nil array
+// items are stay nil. Ints returns an error if an array item is not a
+// bulk string or nil.
+func Ints(reply interface{}, err error) ([]int, error) {
+ var result []int
+ err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error {
+ switch v := v.(type) {
+ case int64:
+ n := int(v)
+ if int64(n) != v {
+ return strconv.ErrRange
+ }
+ result[i] = n
+ return nil
+ case []byte:
+ n, err := strconv.Atoi(string(v))
+ result[i] = n
+ return err
+ default:
+ return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v)
+ }
+ })
+ return result, err
+}
+
+// StringMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
+// Requires an even number of values in result.
+func StringMap(result interface{}, err error) (map[string]string, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: StringMap expects even number of values result")
+ }
+ m := make(map[string]string, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, okKey := values[i].([]byte)
+ value, okValue := values[i+1].([]byte)
+ if !okKey || !okValue {
+ return nil, errors.New("redigo: StringMap key not a bulk string value")
+ }
+ m[string(key)] = string(value)
+ }
+ return m, nil
+}
+
+// IntMap is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func IntMap(result interface{}, err error) (map[string]int, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: IntMap expects even number of values result")
+ }
+ m := make(map[string]int, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, ok := values[i].([]byte)
+ if !ok {
+ return nil, errors.New("redigo: IntMap key not a bulk string value")
+ }
+ value, err := Int(values[i+1], nil)
+ if err != nil {
+ return nil, err
+ }
+ m[string(key)] = value
+ }
+ return m, nil
+}
+
+// Int64Map is a helper that converts an array of strings (alternating key, value)
+// into a map[string]int64. The HGETALL commands return replies in this format.
+// Requires an even number of values in result.
+func Int64Map(result interface{}, err error) (map[string]int64, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ if len(values)%2 != 0 {
+ return nil, errors.New("redigo: Int64Map expects even number of values result")
+ }
+ m := make(map[string]int64, len(values)/2)
+ for i := 0; i < len(values); i += 2 {
+ key, ok := values[i].([]byte)
+ if !ok {
+ return nil, errors.New("redigo: Int64Map key not a bulk string value")
+ }
+ value, err := Int64(values[i+1], nil)
+ if err != nil {
+ return nil, err
+ }
+ m[string(key)] = value
+ }
+ return m, nil
+}
+
+// Positions is a helper that converts an array of positions (lat, long)
+// into a [][2]float64. The GEOPOS command returns replies in this format.
+func Positions(result interface{}, err error) ([]*[2]float64, error) {
+ values, err := Values(result, err)
+ if err != nil {
+ return nil, err
+ }
+ positions := make([]*[2]float64, len(values))
+ for i := range values {
+ if values[i] == nil {
+ continue
+ }
+ p, ok := values[i].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i])
+ }
+ if len(p) != 2 {
+ return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p))
+ }
+ lat, err := Float64(p[0], nil)
+ if err != nil {
+ return nil, err
+ }
+ long, err := Float64(p[1], nil)
+ if err != nil {
+ return nil, err
+ }
+ positions[i] = &[2]float64{lat, long}
+ }
+ return positions, nil
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go
new file mode 100644
index 000000000..ef9551bd4
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/scan.go
@@ -0,0 +1,585 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+func ensureLen(d reflect.Value, n int) {
+ if n > d.Cap() {
+ d.Set(reflect.MakeSlice(d.Type(), n, n))
+ } else {
+ d.SetLen(n)
+ }
+}
+
+func cannotConvert(d reflect.Value, s interface{}) error {
+ var sname string
+ switch s.(type) {
+ case string:
+ sname = "Redis simple string"
+ case Error:
+ sname = "Redis error"
+ case int64:
+ sname = "Redis integer"
+ case []byte:
+ sname = "Redis bulk string"
+ case []interface{}:
+ sname = "Redis array"
+ default:
+ sname = reflect.TypeOf(s).String()
+ }
+ return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
+}
+
+func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Float32, reflect.Float64:
+ var x float64
+ x, err = strconv.ParseFloat(string(s), d.Type().Bits())
+ d.SetFloat(x)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ var x int64
+ x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
+ d.SetInt(x)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ var x uint64
+ x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
+ d.SetUint(x)
+ case reflect.Bool:
+ var x bool
+ x, err = strconv.ParseBool(string(s))
+ d.SetBool(x)
+ case reflect.String:
+ d.SetString(string(s))
+ case reflect.Slice:
+ if d.Type().Elem().Kind() != reflect.Uint8 {
+ err = cannotConvert(d, s)
+ } else {
+ d.SetBytes(s)
+ }
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignInt(d reflect.Value, s int64) (err error) {
+ switch d.Type().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ d.SetInt(s)
+ if d.Int() != s {
+ err = strconv.ErrRange
+ d.SetInt(0)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if s < 0 {
+ err = strconv.ErrRange
+ } else {
+ x := uint64(s)
+ d.SetUint(x)
+ if d.Uint() != x {
+ err = strconv.ErrRange
+ d.SetUint(0)
+ }
+ }
+ case reflect.Bool:
+ d.SetBool(s != 0)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return
+}
+
+func convertAssignValue(d reflect.Value, s interface{}) (err error) {
+ if d.Kind() != reflect.Ptr {
+ if d.CanAddr() {
+ d2 := d.Addr()
+ if d2.CanInterface() {
+ if scanner, ok := d2.Interface().(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+ }
+ }
+ } else if d.CanInterface() {
+ // Already a reflect.Ptr
+ if d.IsNil() {
+ d.Set(reflect.New(d.Type().Elem()))
+ }
+ if scanner, ok := d.Interface().(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+ }
+
+ switch s := s.(type) {
+ case []byte:
+ err = convertAssignBulkString(d, s)
+ case int64:
+ err = convertAssignInt(d, s)
+ default:
+ err = cannotConvert(d, s)
+ }
+ return err
+}
+
+func convertAssignArray(d reflect.Value, s []interface{}) error {
+ if d.Type().Kind() != reflect.Slice {
+ return cannotConvert(d, s)
+ }
+ ensureLen(d, len(s))
+ for i := 0; i < len(s); i++ {
+ if err := convertAssignValue(d.Index(i), s[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func convertAssign(d interface{}, s interface{}) (err error) {
+ if scanner, ok := d.(Scanner); ok {
+ return scanner.RedisScan(s)
+ }
+
+ // Handle the most common destination types using type switches and
+ // fall back to reflection for all other types.
+ switch s := s.(type) {
+ case nil:
+ // ignore
+ case []byte:
+ switch d := d.(type) {
+ case *string:
+ *d = string(s)
+ case *int:
+ *d, err = strconv.Atoi(string(s))
+ case *bool:
+ *d, err = strconv.ParseBool(string(s))
+ case *[]byte:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignBulkString(d.Elem(), s)
+ }
+ }
+ case int64:
+ switch d := d.(type) {
+ case *int:
+ x := int(s)
+ if int64(x) != s {
+ err = strconv.ErrRange
+ x = 0
+ }
+ *d = x
+ case *bool:
+ *d = s != 0
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignInt(d.Elem(), s)
+ }
+ }
+ case string:
+ switch d := d.(type) {
+ case *string:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ case []interface{}:
+ switch d := d.(type) {
+ case *[]interface{}:
+ *d = s
+ case *interface{}:
+ *d = s
+ case nil:
+ // skip value
+ default:
+ if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
+ err = cannotConvert(d, s)
+ } else {
+ err = convertAssignArray(d.Elem(), s)
+ }
+ }
+ case Error:
+ err = s
+ default:
+ err = cannotConvert(reflect.ValueOf(d), s)
+ }
+ return
+}
+
+// Scan copies from src to the values pointed at by dest.
+//
+// Scan uses RedisScan if available otherwise:
+//
+// The values pointed at by dest must be an integer, float, boolean, string,
+// []byte, interface{} or slices of these types. Scan uses the standard strconv
+// package to convert bulk strings to numeric and boolean types.
+//
+// If a dest value is nil, then the corresponding src value is skipped.
+//
+// If a src element is nil, then the corresponding dest value is not modified.
+//
+// To enable easy use of Scan in a loop, Scan returns the slice of src
+// following the copied values.
+func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
+ if len(src) < len(dest) {
+ return nil, errors.New("redigo.Scan: array short")
+ }
+ var err error
+ for i, d := range dest {
+ err = convertAssign(d, src[i])
+ if err != nil {
+ err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
+ break
+ }
+ }
+ return src[len(dest):], err
+}
+
+type fieldSpec struct {
+ name string
+ index []int
+ omitEmpty bool
+}
+
+type structSpec struct {
+ m map[string]*fieldSpec
+ l []*fieldSpec
+}
+
+func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
+ return ss.m[string(name)]
+}
+
+func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ switch {
+ case f.PkgPath != "" && !f.Anonymous:
+ // Ignore unexported fields.
+ case f.Anonymous:
+ // TODO: Handle pointers. Requires change to decoder and
+ // protection against infinite recursion.
+ if f.Type.Kind() == reflect.Struct {
+ compileStructSpec(f.Type, depth, append(index, i), ss)
+ }
+ default:
+ fs := &fieldSpec{name: f.Name}
+ tag := f.Tag.Get("redis")
+ p := strings.Split(tag, ",")
+ if len(p) > 0 {
+ if p[0] == "-" {
+ continue
+ }
+ if len(p[0]) > 0 {
+ fs.name = p[0]
+ }
+ for _, s := range p[1:] {
+ switch s {
+ case "omitempty":
+ fs.omitEmpty = true
+ default:
+ panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
+ }
+ }
+ }
+ d, found := depth[fs.name]
+ if !found {
+ d = 1 << 30
+ }
+ switch {
+ case len(index) == d:
+ // At same depth, remove from result.
+ delete(ss.m, fs.name)
+ j := 0
+ for i := 0; i < len(ss.l); i++ {
+ if fs.name != ss.l[i].name {
+ ss.l[j] = ss.l[i]
+ j += 1
+ }
+ }
+ ss.l = ss.l[:j]
+ case len(index) < d:
+ fs.index = make([]int, len(index)+1)
+ copy(fs.index, index)
+ fs.index[len(index)] = i
+ depth[fs.name] = len(index)
+ ss.m[fs.name] = fs
+ ss.l = append(ss.l, fs)
+ }
+ }
+ }
+}
+
+var (
+ structSpecMutex sync.RWMutex
+ structSpecCache = make(map[reflect.Type]*structSpec)
+ defaultFieldSpec = &fieldSpec{}
+)
+
+func structSpecForType(t reflect.Type) *structSpec {
+
+ structSpecMutex.RLock()
+ ss, found := structSpecCache[t]
+ structSpecMutex.RUnlock()
+ if found {
+ return ss
+ }
+
+ structSpecMutex.Lock()
+ defer structSpecMutex.Unlock()
+ ss, found = structSpecCache[t]
+ if found {
+ return ss
+ }
+
+ ss = &structSpec{m: make(map[string]*fieldSpec)}
+ compileStructSpec(t, make(map[string]int), nil, ss)
+ structSpecCache[t] = ss
+ return ss
+}
+
+var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
+
+// ScanStruct scans alternating names and values from src to a struct. The
+// HGETALL and CONFIG GET commands return replies in this format.
+//
+// ScanStruct uses exported field names to match values in the response. Use
+// 'redis' field tag to override the name:
+//
+// Field int `redis:"myName"`
+//
+// Fields with the tag redis:"-" are ignored.
+//
+// Each field uses RedisScan if available otherwise:
+// Integer, float, boolean, string and []byte fields are supported. Scan uses the
+// standard strconv package to convert bulk string values to numeric and
+// boolean types.
+//
+// If a src element is nil, then the corresponding field is not modified.
+func ScanStruct(src []interface{}, dest interface{}) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanStructValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Struct {
+ return errScanStructValue
+ }
+ ss := structSpecForType(d.Type())
+
+ if len(src)%2 != 0 {
+ return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
+ }
+
+ for i := 0; i < len(src); i += 2 {
+ s := src[i+1]
+ if s == nil {
+ continue
+ }
+ name, ok := src[i].([]byte)
+ if !ok {
+ return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
+ }
+ fs := ss.fieldSpec(name)
+ if fs == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
+ }
+ }
+ return nil
+}
+
+var (
+ errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
+)
+
+// ScanSlice scans src to the slice pointed to by dest. The elements the dest
+// slice must be integer, float, boolean, string, struct or pointer to struct
+// values.
+//
+// Struct fields must be integer, float, boolean or string values. All struct
+// fields are used unless a subset is specified using fieldNames.
+func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
+ d := reflect.ValueOf(dest)
+ if d.Kind() != reflect.Ptr || d.IsNil() {
+ return errScanSliceValue
+ }
+ d = d.Elem()
+ if d.Kind() != reflect.Slice {
+ return errScanSliceValue
+ }
+
+ isPtr := false
+ t := d.Type().Elem()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ isPtr = true
+ t = t.Elem()
+ }
+
+ if t.Kind() != reflect.Struct {
+ ensureLen(d, len(src))
+ for i, s := range src {
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.Index(i), s); err != nil {
+ return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
+ }
+ }
+ return nil
+ }
+
+ ss := structSpecForType(t)
+ fss := ss.l
+ if len(fieldNames) > 0 {
+ fss = make([]*fieldSpec, len(fieldNames))
+ for i, name := range fieldNames {
+ fss[i] = ss.m[name]
+ if fss[i] == nil {
+ return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
+ }
+ }
+ }
+
+ if len(fss) == 0 {
+ return errors.New("redigo.ScanSlice: no struct fields")
+ }
+
+ n := len(src) / len(fss)
+ if n*len(fss) != len(src) {
+ return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
+ }
+
+ ensureLen(d, n)
+ for i := 0; i < n; i++ {
+ d := d.Index(i)
+ if isPtr {
+ if d.IsNil() {
+ d.Set(reflect.New(t))
+ }
+ d = d.Elem()
+ }
+ for j, fs := range fss {
+ s := src[i*len(fss)+j]
+ if s == nil {
+ continue
+ }
+ if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
+ return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
+ }
+ }
+ }
+ return nil
+}
+
+// Args is a helper for constructing command arguments from structured values.
+type Args []interface{}
+
+// Add returns the result of appending value to args.
+func (args Args) Add(value ...interface{}) Args {
+ return append(args, value...)
+}
+
+// AddFlat returns the result of appending the flattened value of v to args.
+//
+// Maps are flattened by appending the alternating keys and map values to args.
+//
+// Slices are flattened by appending the slice elements to args.
+//
+// Structs are flattened by appending the alternating names and values of
+// exported fields to args. If v is a nil struct pointer, then nothing is
+// appended. The 'redis' field tag overrides struct field names. See ScanStruct
+// for more information on the use of the 'redis' field tag.
+//
+// Other types are appended to args as is.
+func (args Args) AddFlat(v interface{}) Args {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Struct:
+ args = flattenStruct(args, rv)
+ case reflect.Slice:
+ for i := 0; i < rv.Len(); i++ {
+ args = append(args, rv.Index(i).Interface())
+ }
+ case reflect.Map:
+ for _, k := range rv.MapKeys() {
+ args = append(args, k.Interface(), rv.MapIndex(k).Interface())
+ }
+ case reflect.Ptr:
+ if rv.Type().Elem().Kind() == reflect.Struct {
+ if !rv.IsNil() {
+ args = flattenStruct(args, rv.Elem())
+ }
+ } else {
+ args = append(args, v)
+ }
+ default:
+ args = append(args, v)
+ }
+ return args
+}
+
+func flattenStruct(args Args, v reflect.Value) Args {
+ ss := structSpecForType(v.Type())
+ for _, fs := range ss.l {
+ fv := v.FieldByIndex(fs.index)
+ if fs.omitEmpty {
+ var empty = false
+ switch fv.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ empty = fv.Len() == 0
+ case reflect.Bool:
+ empty = !fv.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ empty = fv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ empty = fv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ empty = fv.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ empty = fv.IsNil()
+ }
+ if empty {
+ continue
+ }
+ }
+ args = append(args, fs.name, fv.Interface())
+ }
+ return args
+}
diff --git a/vendor/github.com/gomodule/redigo/redis/script.go b/vendor/github.com/gomodule/redigo/redis/script.go
new file mode 100644
index 000000000..0ef1c821f
--- /dev/null
+++ b/vendor/github.com/gomodule/redigo/redis/script.go
@@ -0,0 +1,91 @@
+// Copyright 2012 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package redis
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "io"
+ "strings"
+)
+
+// Script encapsulates the source, hash and key count for a Lua script. See
+// http://redis.io/commands/eval for information on scripts in Redis.
+type Script struct {
+ keyCount int
+ src string
+ hash string
+}
+
+// NewScript returns a new script object. If keyCount is greater than or equal
+// to zero, then the count is automatically inserted in the EVAL command
+// argument list. If keyCount is less than zero, then the application supplies
+// the count as the first value in the keysAndArgs argument to the Do, Send and
+// SendHash methods.
+func NewScript(keyCount int, src string) *Script {
+ h := sha1.New()
+ io.WriteString(h, src)
+ return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
+}
+
+func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
+ var args []interface{}
+ if s.keyCount < 0 {
+ args = make([]interface{}, 1+len(keysAndArgs))
+ args[0] = spec
+ copy(args[1:], keysAndArgs)
+ } else {
+ args = make([]interface{}, 2+len(keysAndArgs))
+ args[0] = spec
+ args[1] = s.keyCount
+ copy(args[2:], keysAndArgs)
+ }
+ return args
+}
+
+// Hash returns the script hash.
+func (s *Script) Hash() string {
+ return s.hash
+}
+
+// Do evaluates the script. Under the covers, Do optimistically evaluates the
+// script using the EVALSHA command. If the command fails because the script is
+// not loaded, then Do evaluates the script using the EVAL command (thus
+// causing the script to load).
+func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
+ v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
+ if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
+ v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
+ }
+ return v, err
+}
+
+// SendHash evaluates the script without waiting for the reply. The script is
+// evaluated with the EVALSHA command. The application must ensure that the
+// script is loaded by a previous call to Send, Do or Load methods.
+func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
+}
+
+// Send evaluates the script without waiting for the reply.
+func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
+ return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
+}
+
+// Load loads the script without evaluating it.
+func (s *Script) Load(c Conn) error {
+ _, err := c.Do("SCRIPT", "LOAD", s.src)
+ return err
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
new file mode 100644
index 000000000..d8156a60b
--- /dev/null
+++ b/vendor/github.com/google/uuid/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - 1.4.3
+ - 1.5.3
+ - tip
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 000000000..04fdf09f1
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 000000000..b4bb97f6b
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 000000000..5dc68268d
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 000000000..9d92c11f1
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,19 @@
+# uuid 
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](http://tools.ietf.org/html/rfc4122)
+and DCE 1.1: Authentication and Security Services.
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid). It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice. One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+`go get github.com/google/uuid`
+
+###### Documentation
+[](http://godoc.org/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here:
+http://godoc.org/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 000000000..fa820b9d3
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+ uuid, err := NewUUID()
+ if err == nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID. Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+ return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+ return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 000000000..5b8a4b9af
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod
new file mode 100644
index 000000000..fc84cd79d
--- /dev/null
+++ b/vendor/github.com/google/uuid/go.mod
@@ -0,0 +1 @@
+module github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 000000000..b17461631
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+ NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+ Nil UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space[:])
+ h.Write(data)
+ s := h.Sum(nil)
+ var uuid UUID
+ copy(uuid[:], s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 000000000..7f9e0c6c0
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+ var js [36]byte
+ encodeHex(js[:], uuid)
+ return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err == nil {
+ *uuid = id
+ }
+ return err
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+ return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(uuid[:], data)
+ return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 000000000..d651a2b06
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "sync"
+)
+
+var (
+ nodeMu sync.Mutex
+ ifname string // name of interface being used
+ nodeID [6]byte // hardware for version 1 UUIDs
+ zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+ iname, addr := getHardwareInterface(name) // null implementation for js
+ if iname != "" && addr != nil {
+ ifname = iname
+ copy(nodeID[:], addr)
+ return true
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ ifname = "random"
+ randomBits(nodeID[:])
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ nid := nodeID
+ return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ copy(nodeID[:], id)
+ ifname = "user"
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ var node [6]byte
+ copy(node[:], uuid[10:])
+ return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 000000000..24b78edc9
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This remvoves the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 000000000..0cbbcddbd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned. If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil {
+ return "", nil
+ }
+ }
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ return ifs.Name, ifs.HardwareAddr
+ }
+ }
+ return "", nil
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 000000000..f326b54db
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case nil:
+ return nil
+
+ case string:
+ // if an empty UUID comes from a table, we return a null UUID
+ if src == "" {
+ return nil
+ }
+
+ // see Parse for required string format
+ u, err := Parse(src)
+ if err != nil {
+ return fmt.Errorf("Scan: %v", err)
+ }
+
+ *uuid = u
+
+ case []byte:
+ // if an empty UUID comes from a table, we return a null UUID
+ if len(src) == 0 {
+ return nil
+ }
+
+ // assumes a simple slice of bytes if 16 bytes
+ // otherwise attempts to parse
+ if len(src) != 16 {
+ return uuid.Scan(string(src))
+ }
+ copy((*uuid)[:], src)
+
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+ return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 000000000..e6ef06cdc
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ timeMu sync.Mutex
+ lasttime uint64 // last time we returned
+ clockSeq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
+func ClockSequence() int {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ oldSeq := clockSeq
+ clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if oldSeq != clockSeq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 000000000..5ea6c7378
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+ b1 := xvalues[x1]
+ b2 := xvalues[x2]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 000000000..524404cc5
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,245 @@
+// Copyright 2018 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// Parse decodes s into a UUID or returns an error. Both the standard UUID
+// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
+// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
+// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+func Parse(s string) (UUID, error) {
+ var uuid UUID
+ switch len(s) {
+ // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36:
+
+ // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9:
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ case 36 + 2:
+ s = s[1:]
+
+ // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ case 32:
+ var ok bool
+ for i := range uuid {
+ uuid[i], ok = xtob(s[i*2], s[i*2+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ v, ok := xtob(s[x], s[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+ var uuid UUID
+ switch len(b) {
+ case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+ }
+ b = b[9:]
+ case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ b = b[1:]
+ case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ var ok bool
+ for i := 0; i < 32; i += 2 {
+ uuid[i/2], ok = xtob(b[i], b[i+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ v, ok := xtob(b[x], b[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+ uuid, err := Parse(s)
+ if err != nil {
+ panic(`uuid: Parse(` + s + `): ` + err.Error())
+ }
+ return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+ err = uuid.UnmarshalBinary(b)
+ return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+ if err != nil {
+ panic(err)
+ }
+ return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ var buf [36]byte
+ encodeHex(buf[:], uuid)
+ return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ var buf [36 + 9]byte
+ copy(buf[:], "urn:uuid:")
+ encodeHex(buf[9:], uuid)
+ return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+ hex.Encode(dst, uuid[:4])
+ dst[8] = '-'
+ hex.Encode(dst[9:13], uuid[4:6])
+ dst[13] = '-'
+ hex.Encode(dst[14:18], uuid[6:8])
+ dst[18] = '-'
+ hex.Encode(dst[19:23], uuid[8:10])
+ dst[23] = '-'
+ hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+ return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 000000000..199a1ac65
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ nodeMu.Unlock()
+
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ timeLow := uint32(now & 0xffffffff)
+ timeMid := uint16((now >> 32) & 0xffff)
+ timeHi := uint16((now >> 48) & 0x0fff)
+ timeHi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], timeLow)
+ binary.BigEndian.PutUint16(uuid[4:], timeMid)
+ binary.BigEndian.PutUint16(uuid[6:], timeHi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID[:])
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 000000000..84af91c9f
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics. New is equivalent to
+// the expression
+//
+// uuid.Must(uuid.NewRandom())
+func New() UUID {
+ return Must(NewRandom())
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() (UUID, error) {
+ var uuid UUID
+ _, err := io.ReadFull(rander, uuid[:])
+ if err != nil {
+ return Nil, err
+ }
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE
new file mode 100644
index 000000000..6d16b6578
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright 2016, Google Inc.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go
new file mode 100644
index 000000000..b1d53dd19
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go
@@ -0,0 +1,161 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "math/rand"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// CallOption is an option used by Invoke to control behaviors of RPC calls.
+// CallOption works by modifying relevant fields of CallSettings.
+type CallOption interface {
+ // Resolve applies the option by modifying cs.
+ Resolve(cs *CallSettings)
+}
+
+// Retryer is used by Invoke to determine retry behavior.
+type Retryer interface {
+ // Retry reports whether a request should be retriedand how long to pause before retrying
+ // if the previous attempt returned with err. Invoke never calls Retry with nil error.
+ Retry(err error) (pause time.Duration, shouldRetry bool)
+}
+
+type retryerOption func() Retryer
+
+func (o retryerOption) Resolve(s *CallSettings) {
+ s.Retry = o
+}
+
+// WithRetry sets CallSettings.Retry to fn.
+func WithRetry(fn func() Retryer) CallOption {
+ return retryerOption(fn)
+}
+
+// OnCodes returns a Retryer that retries if and only if
+// the previous attempt returns a GRPC error whose error code is stored in cc.
+// Pause times between retries are specified by bo.
+//
+// bo is only used for its parameters; each Retryer has its own copy.
+func OnCodes(cc []codes.Code, bo Backoff) Retryer {
+ return &boRetryer{
+ backoff: bo,
+ codes: append([]codes.Code(nil), cc...),
+ }
+}
+
+type boRetryer struct {
+ backoff Backoff
+ codes []codes.Code
+}
+
+func (r *boRetryer) Retry(err error) (time.Duration, bool) {
+ st, ok := status.FromError(err)
+ if !ok {
+ return 0, false
+ }
+ c := st.Code()
+ for _, rc := range r.codes {
+ if c == rc {
+ return r.backoff.Pause(), true
+ }
+ }
+ return 0, false
+}
+
+// Backoff implements exponential backoff.
+// The wait time between retries is a random value between 0 and the "retry envelope".
+// The envelope starts at Initial and increases by the factor of Multiplier every retry,
+// but is capped at Max.
+type Backoff struct {
+ // Initial is the initial value of the retry envelope, defaults to 1 second.
+ Initial time.Duration
+
+ // Max is the maximum value of the retry envelope, defaults to 30 seconds.
+ Max time.Duration
+
+ // Multiplier is the factor by which the retry envelope increases.
+ // It should be greater than 1 and defaults to 2.
+ Multiplier float64
+
+ // cur is the current retry envelope
+ cur time.Duration
+}
+
+// Pause returns the next time.Duration that the caller should use to backoff.
+func (bo *Backoff) Pause() time.Duration {
+ if bo.Initial == 0 {
+ bo.Initial = time.Second
+ }
+ if bo.cur == 0 {
+ bo.cur = bo.Initial
+ }
+ if bo.Max == 0 {
+ bo.Max = 30 * time.Second
+ }
+ if bo.Multiplier < 1 {
+ bo.Multiplier = 2
+ }
+ // Select a duration between 1ns and the current max. It might seem
+ // counterintuitive to have so much jitter, but
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that
+ // that is the best strategy.
+ d := time.Duration(1 + rand.Int63n(int64(bo.cur)))
+ bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
+ if bo.cur > bo.Max {
+ bo.cur = bo.Max
+ }
+ return d
+}
+
+type grpcOpt []grpc.CallOption
+
+func (o grpcOpt) Resolve(s *CallSettings) {
+ s.GRPC = o
+}
+
+// WithGRPCOptions allows passing gRPC call options during client creation.
+func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
+ return grpcOpt(append([]grpc.CallOption(nil), opt...))
+}
+
+// CallSettings allow fine-grained control over how calls are made.
+type CallSettings struct {
+ // Retry returns a Retryer to be used to control retry logic of a method call.
+ // If Retry is nil or the returned Retryer is nil, the call will not be retried.
+ Retry func() Retryer
+
+ // CallOptions to be forwarded to GRPC.
+ GRPC []grpc.CallOption
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go
new file mode 100644
index 000000000..3fd1b0b84
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/gax.go
@@ -0,0 +1,39 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gax contains a set of modules which aid the development of APIs
+// for clients and servers based on gRPC and Google API conventions.
+//
+// Application code will rarely need to use this library directly.
+// However, code generated automatically from API definition files can use it
+// to simplify code generation and to provide more convenient and idiomatic API surfaces.
+package gax
+
+// Version specifies the gax-go version being used.
+const Version = "2.0.4"
diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod
new file mode 100644
index 000000000..9cdfaf447
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/go.mod
@@ -0,0 +1,3 @@
+module github.com/googleapis/gax-go/v2
+
+require google.golang.org/grpc v1.19.0
diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum
new file mode 100644
index 000000000..7fa23ecf9
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/go.sum
@@ -0,0 +1,25 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go
new file mode 100644
index 000000000..139371a0b
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/header.go
@@ -0,0 +1,53 @@
+// Copyright 2018, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import "bytes"
+
+// XGoogHeader is for use by the Google Cloud Libraries only.
+//
+// XGoogHeader formats key-value pairs.
+// The resulting string is suitable for x-goog-api-client header.
+func XGoogHeader(keyval ...string) string {
+ if len(keyval) == 0 {
+ return ""
+ }
+ if len(keyval)%2 != 0 {
+ panic("gax.Header: odd argument count")
+ }
+ var buf bytes.Buffer
+ for i := 0; i < len(keyval); i += 2 {
+ buf.WriteByte(' ')
+ buf.WriteString(keyval[i])
+ buf.WriteByte('/')
+ buf.WriteString(keyval[i+1])
+ }
+ return buf.String()[1:]
+}
diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go
new file mode 100644
index 000000000..fe31dd004
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go
@@ -0,0 +1,99 @@
+// Copyright 2016, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gax
+
+import (
+ "context"
+ "strings"
+ "time"
+)
+
+// APICall is a user defined call stub.
+type APICall func(context.Context, CallSettings) error
+
+// Invoke calls the given APICall,
+// performing retries as specified by opts, if any.
+func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
+ var settings CallSettings
+ for _, opt := range opts {
+ opt.Resolve(&settings)
+ }
+ return invoke(ctx, call, settings, Sleep)
+}
+
+// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
+// If interrupted, Sleep returns ctx.Err().
+func Sleep(ctx context.Context, d time.Duration) error {
+ t := time.NewTimer(d)
+ select {
+ case <-ctx.Done():
+ t.Stop()
+ return ctx.Err()
+ case <-t.C:
+ return nil
+ }
+}
+
+type sleeper func(ctx context.Context, d time.Duration) error
+
+// invoke implements Invoke, taking an additional sleeper argument for testing.
+func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
+ var retryer Retryer
+ for {
+ err := call(ctx, settings)
+ if err == nil {
+ return nil
+ }
+ if settings.Retry == nil {
+ return err
+ }
+ // Never retry permanent certificate errors. (e.x. if ca-certificates
+ // are not installed). We should only make very few, targeted
+ // exceptions: many (other) status=Unavailable should be retried, such
+ // as if there's a network hiccup, or the internet goes out for a
+ // minute. This is also why here we are doing string parsing instead of
+ // simply making Unavailable a non-retried code elsewhere.
+ if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
+ return err
+ }
+ if retryer == nil {
+ if r := settings.Retry(); r != nil {
+ retryer = r
+ } else {
+ return err
+ }
+ }
+ if d, ok := retryer.Retry(err); !ok {
+ return err
+ } else if err = sp(ctx, d); err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore
new file mode 100644
index 000000000..5091fb073
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/.gitignore
@@ -0,0 +1,4 @@
+/jpgo
+jmespath-fuzz.zip
+cpu.out
+go-jmespath.test
diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml
new file mode 100644
index 000000000..1f9807757
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+sudo: false
+
+go:
+ - 1.4
+
+install: go get -v -t ./...
+script: make test
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 000000000..b03310a91
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100644
index 000000000..a828d2848
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " test to run all the tests"
+ @echo " build to build the library and jp executable"
+ @echo " generate to run codegen"
+
+
+generate:
+ go generate ./...
+
+build:
+ rm -f $(CMD)
+ go build ./...
+ rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+ mv cmd/$(CMD)/$(CMD) .
+
+test:
+ go test -v ./...
+
+check:
+ go vet ./...
+ @echo "golint ./..."
+ @lint=`golint ./...`; \
+ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+ echo "$$lint"; \
+ if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+ go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+ go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+ go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+ go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+ go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 000000000..187ef676d
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 000000000..8e26ffeec
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 000000000..1cd2d239c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 000000000..9b7cd89b4
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 000000000..13c74604c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 000000000..817900c8f
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 000000000..1240a1755
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 000000000..dae79cbdf
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 000000000..ddc1b7d7d
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 000000000..955dc0be5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - "output_tests/.*"
+
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 000000000..15556530a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 000000000..449e67cd0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 000000000..c8a9fbb38
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/modern-go/concurrent"
+ packages = ["."]
+ revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+ version = "1.0.0"
+
+[[projects]]
+ name = "github.com/modern-go/reflect2"
+ packages = ["."]
+ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+ version = "1.0.1"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 000000000..313a0f887
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+ name = "github.com/modern-go/reflect2"
+ version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 000000000..2cf4f5ab2
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 000000000..50d56ffbf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,87 @@
+[](https://sourcegraph.com/github.com/json-iterator/go?badge)
+[](http://godoc.org/github.com/json-iterator/go)
+[](https://travis-ci.org/json-iterator/go)
+[](https://codecov.io/gh/json-iterator/go)
+[](https://goreportcard.com/report/github.com/json-iterator/go)
+[](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
+[](https://gitter.im/json-iterator/Lobby)
+
+A high-performance 100% compatible drop-in replacement of "encoding/json"
+
+You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
+
+# Benchmark
+
+
+
+Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
+
+Raw Result (easyjson requires static code generation)
+
+| | ns/op | allocation bytes | allocation times |
+| --- | --- | --- | --- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+
+Always benchmark with your own workload.
+The result depends heavily on the data input.
+
+# Usage
+
+100% compatibility with standard lib
+
+Replace
+
+```go
+import "encoding/json"
+json.Marshal(&data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Marshal(&data)
+```
+
+Replace
+
+```go
+import "encoding/json"
+json.Unmarshal(input, &data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Unmarshal(input, &data)
+```
+
+[More documentation](http://jsoniter.com/migrate-from-go-std.html)
+
+# How to get
+
+```
+go get github.com/json-iterator/go
+```
+
+# Contribution Welcomed !
+
+Contributors
+
+* [thockin](https://github.com/thockin)
+* [mattn](https://github.com/mattn)
+* [cch123](https://github.com/cch123)
+* [Oleg Shaldybin](https://github.com/olegshaldybin)
+* [Jason Toffaletti](https://github.com/toffaletti)
+
+Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 000000000..92d2cc4a3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
+package jsoniter
+
+import (
+ "bytes"
+ "io"
+)
+
+// RawMessage to make replace json with jsoniter
+type RawMessage []byte
+
+// Unmarshal adapts to json/encoding Unmarshal API
+//
+// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Refer to https://godoc.org/encoding/json#Unmarshal for more information
+func Unmarshal(data []byte, v interface{}) error {
+ return ConfigDefault.Unmarshal(data, v)
+}
+
+// UnmarshalFromString is a convenient method to read from string instead of []byte
+func UnmarshalFromString(str string, v interface{}) error {
+ return ConfigDefault.UnmarshalFromString(str, v)
+}
+
+// Get quick method to get value from deeply nested JSON structure
+func Get(data []byte, path ...interface{}) Any {
+ return ConfigDefault.Get(data, path...)
+}
+
+// Marshal adapts to json/encoding Marshal API
+//
+// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
+// Refer to https://godoc.org/encoding/json#Marshal for more information
+func Marshal(v interface{}) ([]byte, error) {
+ return ConfigDefault.Marshal(v)
+}
+
+// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return ConfigDefault.MarshalIndent(v, prefix, indent)
+}
+
+// MarshalToString convenient method to write as string instead of []byte
+func MarshalToString(v interface{}) (string, error) {
+ return ConfigDefault.MarshalToString(v)
+}
+
+// NewDecoder adapts to json/stream NewDecoder API.
+//
+// NewDecoder returns a new decoder that reads from r.
+//
+// Instead of a json/encoding Decoder, an Decoder is returned
+// Refer to https://godoc.org/encoding/json#NewDecoder for more information
+func NewDecoder(reader io.Reader) *Decoder {
+ return ConfigDefault.NewDecoder(reader)
+}
+
+// Decoder reads and decodes JSON values from an input stream.
+// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
+type Decoder struct {
+ iter *Iterator
+}
+
+// Decode decode JSON into interface{}
+func (adapter *Decoder) Decode(obj interface{}) error {
+ if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
+ if !adapter.iter.loadMore() {
+ return io.EOF
+ }
+ }
+ adapter.iter.ReadVal(obj)
+ err := adapter.iter.Error
+ if err == io.EOF {
+ return nil
+ }
+ return adapter.iter.Error
+}
+
+// More is there more?
+func (adapter *Decoder) More() bool {
+ iter := adapter.iter
+ if iter.Error != nil {
+ return false
+ }
+ c := iter.nextToken()
+ if c == 0 {
+ return false
+ }
+ iter.unreadByte()
+ return c != ']' && c != '}'
+}
+
+// Buffered remaining buffer
+func (adapter *Decoder) Buffered() io.Reader {
+ remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
+ return bytes.NewReader(remaining)
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (adapter *Decoder) UseNumber() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.UseNumber = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (adapter *Decoder) DisallowUnknownFields() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.DisallowUnknownFields = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// NewEncoder same as json.NewEncoder
+func NewEncoder(writer io.Writer) *Encoder {
+ return ConfigDefault.NewEncoder(writer)
+}
+
+// Encoder same as json.Encoder
+type Encoder struct {
+ stream *Stream
+}
+
+// Encode encode interface{} as JSON to io.Writer
+func (adapter *Encoder) Encode(val interface{}) error {
+ adapter.stream.WriteVal(val)
+ adapter.stream.WriteRaw("\n")
+ adapter.stream.Flush()
+ return adapter.stream.Error
+}
+
+// SetIndent set the indention. Prefix is not supported
+func (adapter *Encoder) SetIndent(prefix, indent string) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.IndentionStep = len(indent)
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// SetEscapeHTML escape html by default, set to false to disable
+func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.EscapeHTML = escapeHTML
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+ return ConfigDefault.Valid(data)
+}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 000000000..f6b8aeab0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,325 @@
+package jsoniter
+
+import (
+ "errors"
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "strconv"
+ "unsafe"
+)
+
+// Any generic object representation.
+// The lazy json implementation holds []byte and parse lazily.
+type Any interface {
+ LastError() error
+ ValueType() ValueType
+ MustBeValid() Any
+ ToBool() bool
+ ToInt() int
+ ToInt32() int32
+ ToInt64() int64
+ ToUint() uint
+ ToUint32() uint32
+ ToUint64() uint64
+ ToFloat32() float32
+ ToFloat64() float64
+ ToString() string
+ ToVal(val interface{})
+ Get(path ...interface{}) Any
+ Size() int
+ Keys() []string
+ GetInterface() interface{}
+ WriteTo(stream *Stream)
+}
+
+type baseAny struct{}
+
+func (any *baseAny) Get(path ...interface{}) Any {
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *baseAny) Size() int {
+ return 0
+}
+
+func (any *baseAny) Keys() []string {
+ return []string{}
+}
+
+func (any *baseAny) ToVal(obj interface{}) {
+ panic("not implemented")
+}
+
+// WrapInt32 turn int32 into Any interface
+func WrapInt32(val int32) Any {
+ return &int32Any{baseAny{}, val}
+}
+
+// WrapInt64 turn int64 into Any interface
+func WrapInt64(val int64) Any {
+ return &int64Any{baseAny{}, val}
+}
+
+// WrapUint32 turn uint32 into Any interface
+func WrapUint32(val uint32) Any {
+ return &uint32Any{baseAny{}, val}
+}
+
+// WrapUint64 turn uint64 into Any interface
+func WrapUint64(val uint64) Any {
+ return &uint64Any{baseAny{}, val}
+}
+
+// WrapFloat64 turn float64 into Any interface
+func WrapFloat64(val float64) Any {
+ return &floatAny{baseAny{}, val}
+}
+
+// WrapString turn string into Any interface
+func WrapString(val string) Any {
+ return &stringAny{baseAny{}, val}
+}
+
+// Wrap turn a go object into Any interface
+func Wrap(val interface{}) Any {
+ if val == nil {
+ return &nilAny{}
+ }
+ asAny, isAny := val.(Any)
+ if isAny {
+ return asAny
+ }
+ typ := reflect2.TypeOf(val)
+ switch typ.Kind() {
+ case reflect.Slice:
+ return wrapArray(val)
+ case reflect.Struct:
+ return wrapStruct(val)
+ case reflect.Map:
+ return wrapMap(val)
+ case reflect.String:
+ return WrapString(val.(string))
+ case reflect.Int:
+ if strconv.IntSize == 32 {
+ return WrapInt32(int32(val.(int)))
+ }
+ return WrapInt64(int64(val.(int)))
+ case reflect.Int8:
+ return WrapInt32(int32(val.(int8)))
+ case reflect.Int16:
+ return WrapInt32(int32(val.(int16)))
+ case reflect.Int32:
+ return WrapInt32(val.(int32))
+ case reflect.Int64:
+ return WrapInt64(val.(int64))
+ case reflect.Uint:
+ if strconv.IntSize == 32 {
+ return WrapUint32(uint32(val.(uint)))
+ }
+ return WrapUint64(uint64(val.(uint)))
+ case reflect.Uintptr:
+ if ptrSize == 32 {
+ return WrapUint32(uint32(val.(uintptr)))
+ }
+ return WrapUint64(uint64(val.(uintptr)))
+ case reflect.Uint8:
+ return WrapUint32(uint32(val.(uint8)))
+ case reflect.Uint16:
+ return WrapUint32(uint32(val.(uint16)))
+ case reflect.Uint32:
+ return WrapUint32(uint32(val.(uint32)))
+ case reflect.Uint64:
+ return WrapUint64(val.(uint64))
+ case reflect.Float32:
+ return WrapFloat64(float64(val.(float32)))
+ case reflect.Float64:
+ return WrapFloat64(val.(float64))
+ case reflect.Bool:
+ if val.(bool) == true {
+ return &trueAny{}
+ }
+ return &falseAny{}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
+}
+
+// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
+func (iter *Iterator) ReadAny() Any {
+ return iter.readAny()
+}
+
+func (iter *Iterator) readAny() Any {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.unreadByte()
+ return &stringAny{baseAny{}, iter.ReadString()}
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return &nilAny{}
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ return &trueAny{}
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ return &falseAny{}
+ case '{':
+ return iter.readObjectAny()
+ case '[':
+ return iter.readArrayAny()
+ case '-':
+ return iter.readNumberAny(false)
+ case 0:
+ return &invalidAny{baseAny{}, errors.New("input is empty")}
+ default:
+ return iter.readNumberAny(true)
+ }
+}
+
+func (iter *Iterator) readNumberAny(positive bool) Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipNumber()
+ lazyBuf := iter.stopCapture()
+ return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readObjectAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipObject()
+ lazyBuf := iter.stopCapture()
+ return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readArrayAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipArray()
+ lazyBuf := iter.stopCapture()
+ return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func locateObjectField(iter *Iterator, target string) []byte {
+ var found []byte
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ if field == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ return true
+ })
+ return found
+}
+
+func locateArrayElement(iter *Iterator, target int) []byte {
+ var found []byte
+ n := 0
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ if n == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ n++
+ return true
+ })
+ return found
+}
+
+func locatePath(iter *Iterator, path []interface{}) Any {
+ for i, pathKeyObj := range path {
+ switch pathKey := pathKeyObj.(type) {
+ case string:
+ valueBytes := locateObjectField(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int:
+ valueBytes := locateArrayElement(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int32:
+ if '*' == pathKey {
+ return iter.readAny().Get(path[i:]...)
+ }
+ return newInvalidAny(path[i:])
+ default:
+ return newInvalidAny(path[i:])
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return &invalidAny{baseAny{}, iter.Error}
+ }
+ return iter.readAny()
+}
+
+var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
+
+func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+type anyCodec struct {
+ valType reflect2.Type
+}
+
+func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ panic("not implemented")
+}
+
+func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ any.WriteTo(stream)
+}
+
+func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ return any.Size() == 0
+}
+
+type directAnyCodec struct {
+}
+
+func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *(*Any)(ptr) = iter.readAny()
+}
+
+func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ any := *(*Any)(ptr)
+ if any == nil {
+ stream.WriteNil()
+ return
+ }
+ any.WriteTo(stream)
+}
+
+func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ any := *(*Any)(ptr)
+ return any.Size() == 0
+}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 000000000..0449e9aa4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type arrayLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *arrayLazyAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *arrayLazyAny) ToBool() bool {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.ReadArray()
+}
+
+func (any *arrayLazyAny) ToInt() int {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt32() int32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt64() int64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint() uint {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint32() uint32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint64() uint64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat32() float32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat64() float64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *arrayLazyAny) ToVal(val interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(val)
+}
+
+func (any *arrayLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateArrayElement(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ arr := make([]Any, 0)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ found := iter.readAny().Get(path[1:]...)
+ if found.ValueType() != InvalidValue {
+ arr = append(arr, found)
+ }
+ return true
+ })
+ return wrapArray(arr)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ size++
+ iter.Skip()
+ return true
+ })
+ return size
+}
+
+func (any *arrayLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *arrayLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type arrayAny struct {
+ baseAny
+ val reflect.Value
+}
+
+func wrapArray(val interface{}) *arrayAny {
+ return &arrayAny{baseAny{}, reflect.ValueOf(val)}
+}
+
+func (any *arrayAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayAny) LastError() error {
+ return nil
+}
+
+func (any *arrayAny) ToBool() bool {
+ return any.val.Len() != 0
+}
+
+func (any *arrayAny) ToInt() int {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt32() int32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt64() int64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint() uint {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint32() uint32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint64() uint64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat32() float32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat64() float64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToString() string {
+ str, _ := MarshalToString(any.val.Interface())
+ return str
+}
+
+func (any *arrayAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ if firstPath < 0 || firstPath >= any.val.Len() {
+ return newInvalidAny(path)
+ }
+ return Wrap(any.val.Index(firstPath).Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := make([]Any, 0)
+ for i := 0; i < any.val.Len(); i++ {
+ mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll = append(mappedAll, mapped)
+ }
+ }
+ return wrapArray(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *arrayAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *arrayAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 000000000..9452324af
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
+package jsoniter
+
+type trueAny struct {
+ baseAny
+}
+
+func (any *trueAny) LastError() error {
+ return nil
+}
+
+func (any *trueAny) ToBool() bool {
+ return true
+}
+
+func (any *trueAny) ToInt() int {
+ return 1
+}
+
+func (any *trueAny) ToInt32() int32 {
+ return 1
+}
+
+func (any *trueAny) ToInt64() int64 {
+ return 1
+}
+
+func (any *trueAny) ToUint() uint {
+ return 1
+}
+
+func (any *trueAny) ToUint32() uint32 {
+ return 1
+}
+
+func (any *trueAny) ToUint64() uint64 {
+ return 1
+}
+
+func (any *trueAny) ToFloat32() float32 {
+ return 1
+}
+
+func (any *trueAny) ToFloat64() float64 {
+ return 1
+}
+
+func (any *trueAny) ToString() string {
+ return "true"
+}
+
+func (any *trueAny) WriteTo(stream *Stream) {
+ stream.WriteTrue()
+}
+
+func (any *trueAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *trueAny) GetInterface() interface{} {
+ return true
+}
+
+func (any *trueAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *trueAny) MustBeValid() Any {
+ return any
+}
+
+type falseAny struct {
+ baseAny
+}
+
+func (any *falseAny) LastError() error {
+ return nil
+}
+
+func (any *falseAny) ToBool() bool {
+ return false
+}
+
+func (any *falseAny) ToInt() int {
+ return 0
+}
+
+func (any *falseAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *falseAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *falseAny) ToUint() uint {
+ return 0
+}
+
+func (any *falseAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *falseAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *falseAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *falseAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *falseAny) ToString() string {
+ return "false"
+}
+
+func (any *falseAny) WriteTo(stream *Stream) {
+ stream.WriteFalse()
+}
+
+func (any *falseAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *falseAny) GetInterface() interface{} {
+ return false
+}
+
+func (any *falseAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *falseAny) MustBeValid() Any {
+ return any
+}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 000000000..35fdb0949
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type floatAny struct {
+ baseAny
+ val float64
+}
+
+func (any *floatAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *floatAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *floatAny) MustBeValid() Any {
+ return any
+}
+
+func (any *floatAny) LastError() error {
+ return nil
+}
+
+func (any *floatAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *floatAny) ToInt() int {
+ return int(any.val)
+}
+
+func (any *floatAny) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *floatAny) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *floatAny) ToUint() uint {
+ if any.val > 0 {
+ return uint(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint32() uint32 {
+ if any.val > 0 {
+ return uint32(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint64() uint64 {
+ if any.val > 0 {
+ return uint64(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *floatAny) ToFloat64() float64 {
+ return any.val
+}
+
+func (any *floatAny) ToString() string {
+ return strconv.FormatFloat(any.val, 'E', -1, 64)
+}
+
+func (any *floatAny) WriteTo(stream *Stream) {
+ stream.WriteFloat64(any.val)
+}
+
+func (any *floatAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 000000000..1b56f3991
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int32Any struct {
+ baseAny
+ val int32
+}
+
+func (any *int32Any) LastError() error {
+ return nil
+}
+
+func (any *int32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int32Any) ToInt32() int32 {
+ return any.val
+}
+
+func (any *int32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *int32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int32Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *int32Any) WriteTo(stream *Stream) {
+ stream.WriteInt32(any.val)
+}
+
+func (any *int32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 000000000..c440d72b6
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int64Any struct {
+ baseAny
+ val int64
+}
+
+func (any *int64Any) LastError() error {
+ return nil
+}
+
+func (any *int64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *int64Any) ToInt64() int64 {
+ return any.val
+}
+
+func (any *int64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int64Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int64Any) ToString() string {
+ return strconv.FormatInt(any.val, 10)
+}
+
+func (any *int64Any) WriteTo(stream *Stream) {
+ stream.WriteInt64(any.val)
+}
+
+func (any *int64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 000000000..1d859eac3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
+package jsoniter
+
+import "fmt"
+
+type invalidAny struct {
+ baseAny
+ err error
+}
+
+func newInvalidAny(path []interface{}) *invalidAny {
+ return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
+}
+
+func (any *invalidAny) LastError() error {
+ return any.err
+}
+
+func (any *invalidAny) ValueType() ValueType {
+ return InvalidValue
+}
+
+func (any *invalidAny) MustBeValid() Any {
+ panic(any.err)
+}
+
+func (any *invalidAny) ToBool() bool {
+ return false
+}
+
+func (any *invalidAny) ToInt() int {
+ return 0
+}
+
+func (any *invalidAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *invalidAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *invalidAny) ToUint() uint {
+ return 0
+}
+
+func (any *invalidAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *invalidAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *invalidAny) ToString() string {
+ return ""
+}
+
+func (any *invalidAny) WriteTo(stream *Stream) {
+}
+
+func (any *invalidAny) Get(path ...interface{}) Any {
+ if any.err == nil {
+ return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
+}
+
+func (any *invalidAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *invalidAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 000000000..d04cb54c1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
+package jsoniter
+
+type nilAny struct {
+ baseAny
+}
+
+func (any *nilAny) LastError() error {
+ return nil
+}
+
+func (any *nilAny) ValueType() ValueType {
+ return NilValue
+}
+
+func (any *nilAny) MustBeValid() Any {
+ return any
+}
+
+func (any *nilAny) ToBool() bool {
+ return false
+}
+
+func (any *nilAny) ToInt() int {
+ return 0
+}
+
+func (any *nilAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *nilAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *nilAny) ToUint() uint {
+ return 0
+}
+
+func (any *nilAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *nilAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *nilAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *nilAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *nilAny) ToString() string {
+ return ""
+}
+
+func (any *nilAny) WriteTo(stream *Stream) {
+ stream.WriteNil()
+}
+
+func (any *nilAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *nilAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 000000000..9d1e901a6
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
+package jsoniter
+
+import (
+ "io"
+ "unsafe"
+)
+
+type numberLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *numberLazyAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *numberLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *numberLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *numberLazyAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *numberLazyAny) ToInt() int {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt32() int32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt64() int64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint() uint {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint32() uint32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint64() uint64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat32() float32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat64() float64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *numberLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *numberLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 000000000..c44ef5c98
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type objectLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *objectLazyAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *objectLazyAny) ToBool() bool {
+ return true
+}
+
+func (any *objectLazyAny) ToInt() int {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *objectLazyAny) ToVal(obj interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(obj)
+}
+
+func (any *objectLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateObjectField(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ mapped := locatePath(iter, path[1:])
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[field] = mapped
+ }
+ return true
+ })
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectLazyAny) Keys() []string {
+ keys := []string{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ keys = append(keys, field)
+ return true
+ })
+ return keys
+}
+
+func (any *objectLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ size++
+ return true
+ })
+ return size
+}
+
+func (any *objectLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *objectLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type objectAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapStruct(val interface{}) *objectAny {
+ return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *objectAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *objectAny) LastError() error {
+ return any.err
+}
+
+func (any *objectAny) ToBool() bool {
+ return any.val.NumField() != 0
+}
+
+func (any *objectAny) ToInt() int {
+ return 0
+}
+
+func (any *objectAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *objectAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ field := any.val.FieldByName(firstPath)
+ if !field.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(field.Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for i := 0; i < any.val.NumField(); i++ {
+ field := any.val.Field(i)
+ if field.CanInterface() {
+ mapped := Wrap(field.Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[any.val.Type().Field(i).Name] = mapped
+ }
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectAny) Keys() []string {
+ keys := make([]string, 0, any.val.NumField())
+ for i := 0; i < any.val.NumField(); i++ {
+ keys = append(keys, any.val.Type().Field(i).Name)
+ }
+ return keys
+}
+
+func (any *objectAny) Size() int {
+ return any.val.NumField()
+}
+
+func (any *objectAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *objectAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
+
+type mapAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapMap(val interface{}) *mapAny {
+ return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *mapAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *mapAny) MustBeValid() Any {
+ return any
+}
+
+func (any *mapAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *mapAny) LastError() error {
+ return any.err
+}
+
+func (any *mapAny) ToBool() bool {
+ return true
+}
+
+func (any *mapAny) ToInt() int {
+ return 0
+}
+
+func (any *mapAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *mapAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *mapAny) ToUint() uint {
+ return 0
+}
+
+func (any *mapAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *mapAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *mapAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *mapAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *mapAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *mapAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for _, key := range any.val.MapKeys() {
+ keyAsStr := key.String()
+ element := Wrap(any.val.MapIndex(key).Interface())
+ mapped := element.Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[keyAsStr] = mapped
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ value := any.val.MapIndex(reflect.ValueOf(firstPath))
+ if !value.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(value.Interface())
+ }
+}
+
+func (any *mapAny) Keys() []string {
+ keys := make([]string, 0, any.val.Len())
+ for _, key := range any.val.MapKeys() {
+ keys = append(keys, key.String())
+ }
+ return keys
+}
+
+func (any *mapAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *mapAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *mapAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 000000000..a4b93c78c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type stringAny struct {
+ baseAny
+ val string
+}
+
+func (any *stringAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *stringAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *stringAny) ValueType() ValueType {
+ return StringValue
+}
+
+func (any *stringAny) MustBeValid() Any {
+ return any
+}
+
+func (any *stringAny) LastError() error {
+ return nil
+}
+
+func (any *stringAny) ToBool() bool {
+ str := any.ToString()
+ if str == "0" {
+ return false
+ }
+ for _, c := range str {
+ switch c {
+ case ' ', '\n', '\r', '\t':
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+func (any *stringAny) ToInt() int {
+ return int(any.ToInt64())
+
+}
+
+func (any *stringAny) ToInt32() int32 {
+ return int32(any.ToInt64())
+}
+
+func (any *stringAny) ToInt64() int64 {
+ if any.val == "" {
+ return 0
+ }
+
+ flag := 1
+ startPos := 0
+ endPos := 0
+ if any.val[0] == '+' || any.val[0] == '-' {
+ startPos = 1
+ }
+
+ if any.val[0] == '-' {
+ flag = -1
+ }
+
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
+ return int64(flag) * parsed
+}
+
+func (any *stringAny) ToUint() uint {
+ return uint(any.ToUint64())
+}
+
+func (any *stringAny) ToUint32() uint32 {
+ return uint32(any.ToUint64())
+}
+
+func (any *stringAny) ToUint64() uint64 {
+ if any.val == "" {
+ return 0
+ }
+
+ startPos := 0
+ endPos := 0
+
+ if any.val[0] == '-' {
+ return 0
+ }
+ if any.val[0] == '+' {
+ startPos = 1
+ }
+
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
+ return parsed
+}
+
+func (any *stringAny) ToFloat32() float32 {
+ return float32(any.ToFloat64())
+}
+
+func (any *stringAny) ToFloat64() float64 {
+ if len(any.val) == 0 {
+ return 0
+ }
+
+ // first char invalid
+ if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
+ return 0
+ }
+
+ // extract valid num expression from string
+ // eg 123true => 123, -12.12xxa => -12.12
+ endPos := 1
+ for i := 1; i < len(any.val); i++ {
+ if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
+ endPos = i + 1
+ continue
+ }
+
+ // end position is the first char which is not digit
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ endPos = i
+ break
+ }
+ }
+ parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
+ return parsed
+}
+
+func (any *stringAny) ToString() string {
+ return any.val
+}
+
+func (any *stringAny) WriteTo(stream *Stream) {
+ stream.WriteString(any.val)
+}
+
+func (any *stringAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 000000000..656bbd33d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint32Any struct {
+ baseAny
+ val uint32
+}
+
+func (any *uint32Any) LastError() error {
+ return nil
+}
+
+func (any *uint32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint32Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint32Any) ToUint32() uint32 {
+ return any.val
+}
+
+func (any *uint32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *uint32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *uint32Any) WriteTo(stream *Stream) {
+ stream.WriteUint32(any.val)
+}
+
+func (any *uint32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 000000000..7df2fce33
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint64Any struct {
+ baseAny
+ val uint64
+}
+
+func (any *uint64Any) LastError() error {
+ return nil
+}
+
+func (any *uint64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint64Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *uint64Any) ToUint64() uint64 {
+ return any.val
+}
+
+func (any *uint64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint64Any) ToString() string {
+ return strconv.FormatUint(any.val, 10)
+}
+
+func (any *uint64Any) WriteTo(stream *Stream) {
+ stream.WriteUint64(any.val)
+}
+
+func (any *uint64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100644
index 000000000..b45ef6883
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+ mkdir -p /tmp/build-golang/src/github.com/json-iterator
+ ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 000000000..8c58fcba5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/modern-go/concurrent"
+ "github.com/modern-go/reflect2"
+)
+
+// Config customize how the API should behave.
+// The API is created from Config by Froze.
+type Config struct {
+ IndentionStep int
+ MarshalFloatWith6Digits bool
+ EscapeHTML bool
+ SortMapKeys bool
+ UseNumber bool
+ DisallowUnknownFields bool
+ TagKey string
+ OnlyTaggedField bool
+ ValidateJsonRawMessage bool
+ ObjectFieldMustBeSimpleString bool
+ CaseSensitive bool
+}
+
+// API the public interface of this package.
+// Primary Marshal and Unmarshal.
+type API interface {
+ IteratorPool
+ StreamPool
+ MarshalToString(v interface{}) (string, error)
+ Marshal(v interface{}) ([]byte, error)
+ MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
+ UnmarshalFromString(str string, v interface{}) error
+ Unmarshal(data []byte, v interface{}) error
+ Get(data []byte, path ...interface{}) Any
+ NewEncoder(writer io.Writer) *Encoder
+ NewDecoder(reader io.Reader) *Decoder
+ Valid(data []byte) bool
+ RegisterExtension(extension Extension)
+ DecoderOf(typ reflect2.Type) ValDecoder
+ EncoderOf(typ reflect2.Type) ValEncoder
+}
+
+// ConfigDefault the default API
+var ConfigDefault = Config{
+ EscapeHTML: true,
+}.Froze()
+
+// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
+var ConfigCompatibleWithStandardLibrary = Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+}.Froze()
+
+// ConfigFastest marshals float with only 6 digits precision
+var ConfigFastest = Config{
+ EscapeHTML: false,
+ MarshalFloatWith6Digits: true, // will lose precession
+ ObjectFieldMustBeSimpleString: true, // do not unescape object field
+}.Froze()
+
+type frozenConfig struct {
+ configBeforeFrozen Config
+ sortMapKeys bool
+ indentionStep int
+ objectFieldMustBeSimpleString bool
+ onlyTaggedField bool
+ disallowUnknownFields bool
+ decoderCache *concurrent.Map
+ encoderCache *concurrent.Map
+ encoderExtension Extension
+ decoderExtension Extension
+ extraExtensions []Extension
+ streamPool *sync.Pool
+ iteratorPool *sync.Pool
+ caseSensitive bool
+}
+
+func (cfg *frozenConfig) initCache() {
+ cfg.decoderCache = concurrent.NewMap()
+ cfg.encoderCache = concurrent.NewMap()
+}
+
+func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
+ cfg.decoderCache.Store(cacheKey, decoder)
+}
+
+func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
+ cfg.encoderCache.Store(cacheKey, encoder)
+}
+
+func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
+ decoder, found := cfg.decoderCache.Load(cacheKey)
+ if found {
+ return decoder.(ValDecoder)
+ }
+ return nil
+}
+
+func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
+ encoder, found := cfg.encoderCache.Load(cacheKey)
+ if found {
+ return encoder.(ValEncoder)
+ }
+ return nil
+}
+
+var cfgCache = concurrent.NewMap()
+
+func getFrozenConfigFromCache(cfg Config) *frozenConfig {
+ obj, found := cfgCache.Load(cfg)
+ if found {
+ return obj.(*frozenConfig)
+ }
+ return nil
+}
+
+func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
+ cfgCache.Store(cfg, frozenConfig)
+}
+
+// Froze forge API from config
+func (cfg Config) Froze() API {
+ api := &frozenConfig{
+ sortMapKeys: cfg.SortMapKeys,
+ indentionStep: cfg.IndentionStep,
+ objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
+ onlyTaggedField: cfg.OnlyTaggedField,
+ disallowUnknownFields: cfg.DisallowUnknownFields,
+ caseSensitive: cfg.CaseSensitive,
+ }
+ api.streamPool = &sync.Pool{
+ New: func() interface{} {
+ return NewStream(api, nil, 512)
+ },
+ }
+ api.iteratorPool = &sync.Pool{
+ New: func() interface{} {
+ return NewIterator(api)
+ },
+ }
+ api.initCache()
+ encoderExtension := EncoderExtension{}
+ decoderExtension := DecoderExtension{}
+ if cfg.MarshalFloatWith6Digits {
+ api.marshalFloatWith6Digits(encoderExtension)
+ }
+ if cfg.EscapeHTML {
+ api.escapeHTML(encoderExtension)
+ }
+ if cfg.UseNumber {
+ api.useNumber(decoderExtension)
+ }
+ if cfg.ValidateJsonRawMessage {
+ api.validateJsonRawMessage(encoderExtension)
+ }
+ api.encoderExtension = encoderExtension
+ api.decoderExtension = decoderExtension
+ api.configBeforeFrozen = cfg
+ return api
+}
+
+func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
+ api := getFrozenConfigFromCache(cfg)
+ if api != nil {
+ return api
+ }
+ api = cfg.Froze().(*frozenConfig)
+ for _, extension := range extraExtensions {
+ api.RegisterExtension(extension)
+ }
+ addFrozenConfigToCache(cfg, api)
+ return api
+}
+
+func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
+ encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
+ rawMessage := *(*json.RawMessage)(ptr)
+ iter := cfg.BorrowIterator([]byte(rawMessage))
+ iter.Read()
+ if iter.Error != nil {
+ stream.WriteRaw("null")
+ } else {
+ cfg.ReturnIterator(iter)
+ stream.WriteRaw(string(rawMessage))
+ }
+ }, func(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+ }}
+ extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
+ extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
+}
+
+func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
+ extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
+ exitingValue := *((*interface{})(ptr))
+ if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
+ iter.ReadVal(exitingValue)
+ return
+ }
+ if iter.WhatIsNext() == NumberValue {
+ *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
+ } else {
+ *((*interface{})(ptr)) = iter.Read()
+ }
+ }}
+}
+func (cfg *frozenConfig) getTagKey() string {
+ tagKey := cfg.configBeforeFrozen.TagKey
+ if tagKey == "" {
+ return "json"
+ }
+ return tagKey
+}
+
+func (cfg *frozenConfig) RegisterExtension(extension Extension) {
+ cfg.extraExtensions = append(cfg.extraExtensions, extension)
+ copied := cfg.configBeforeFrozen
+ cfg.configBeforeFrozen = copied
+}
+
+type lossyFloat32Encoder struct {
+}
+
+func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32Lossy(*((*float32)(ptr)))
+}
+
+func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type lossyFloat64Encoder struct {
+}
+
+func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64Lossy(*((*float64)(ptr)))
+}
+
+func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+// EnableLossyFloatMarshalling keeps 10**(-6) precision
+// for float variables for better performance.
+func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
+ // for better performance
+ extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
+ extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
+}
+
+type htmlEscapedStringEncoder struct {
+}
+
+func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteStringWithHTMLEscaped(str)
+}
+
+func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
+ encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
+}
+
+func (cfg *frozenConfig) cleanDecoders() {
+ typeDecoders = map[string]ValDecoder{}
+ fieldDecoders = map[string]ValDecoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) cleanEncoders() {
+ typeEncoders = map[string]ValEncoder{}
+ fieldEncoders = map[string]ValEncoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return "", stream.Error
+ }
+ return string(stream.Buffer()), nil
+}
+
+func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return nil, stream.Error
+ }
+ result := stream.Buffer()
+ copied := make([]byte, len(result))
+ copy(copied, result)
+ return copied, nil
+}
+
+func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ if prefix != "" {
+ panic("prefix is not supported")
+ }
+ for _, r := range indent {
+ if r != ' ' {
+ panic("indent can only be space")
+ }
+ }
+ newCfg := cfg.configBeforeFrozen
+ newCfg.IndentionStep = len(indent)
+ return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
+}
+
+func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
+ data := []byte(str)
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ return locatePath(iter, path)
+}
+
+func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
+ stream := NewStream(cfg, writer, 512)
+ return &Encoder{stream}
+}
+
+func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
+ iter := Parse(cfg, reader, 512)
+ return &Decoder{iter}
+}
+
+func (cfg *frozenConfig) Valid(data []byte) bool {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.Skip()
+ return iter.Error == nil
+}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 000000000..3095662b0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin|
+| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin|
+| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod
new file mode 100644
index 000000000..e05c42ff5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.mod
@@ -0,0 +1,11 @@
+module github.com/json-iterator/go
+
+go 1.12
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ github.com/google/gofuzz v1.0.0
+ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
+ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum
new file mode 100644
index 000000000..d778b5a14
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.sum
@@ -0,0 +1,14 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 000000000..29b31cf78
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,349 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// ValueType the type for JSON element
+type ValueType int
+
+const (
+ // InvalidValue invalid JSON element
+ InvalidValue ValueType = iota
+ // StringValue JSON element "string"
+ StringValue
+ // NumberValue JSON element 100 or 0.10
+ NumberValue
+ // NilValue JSON element null
+ NilValue
+ // BoolValue JSON element true or false
+ BoolValue
+ // ArrayValue JSON element []
+ ArrayValue
+ // ObjectValue JSON element {}
+ ObjectValue
+)
+
+var hexDigits []byte
+var valueTypes []ValueType
+
+func init() {
+ hexDigits = make([]byte, 256)
+ for i := 0; i < len(hexDigits); i++ {
+ hexDigits[i] = 255
+ }
+ for i := '0'; i <= '9'; i++ {
+ hexDigits[i] = byte(i - '0')
+ }
+ for i := 'a'; i <= 'f'; i++ {
+ hexDigits[i] = byte((i - 'a') + 10)
+ }
+ for i := 'A'; i <= 'F'; i++ {
+ hexDigits[i] = byte((i - 'A') + 10)
+ }
+ valueTypes = make([]ValueType, 256)
+ for i := 0; i < len(valueTypes); i++ {
+ valueTypes[i] = InvalidValue
+ }
+ valueTypes['"'] = StringValue
+ valueTypes['-'] = NumberValue
+ valueTypes['0'] = NumberValue
+ valueTypes['1'] = NumberValue
+ valueTypes['2'] = NumberValue
+ valueTypes['3'] = NumberValue
+ valueTypes['4'] = NumberValue
+ valueTypes['5'] = NumberValue
+ valueTypes['6'] = NumberValue
+ valueTypes['7'] = NumberValue
+ valueTypes['8'] = NumberValue
+ valueTypes['9'] = NumberValue
+ valueTypes['t'] = BoolValue
+ valueTypes['f'] = BoolValue
+ valueTypes['n'] = NilValue
+ valueTypes['['] = ArrayValue
+ valueTypes['{'] = ObjectValue
+}
+
+// Iterator is a io.Reader like object, with JSON specific read functions.
+// Error is not returned as return value, but stored as Error member on this iterator instance.
+type Iterator struct {
+ cfg *frozenConfig
+ reader io.Reader
+ buf []byte
+ head int
+ tail int
+ depth int
+ captureStartedAt int
+ captured []byte
+ Error error
+ Attachment interface{} // open for customized decoder
+}
+
+// NewIterator creates an empty Iterator instance
+func NewIterator(cfg API) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: nil,
+ head: 0,
+ tail: 0,
+ depth: 0,
+ }
+}
+
+// Parse creates an Iterator instance from io.Reader
+func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: reader,
+ buf: make([]byte, bufSize),
+ head: 0,
+ tail: 0,
+ depth: 0,
+ }
+}
+
+// ParseBytes creates an Iterator instance from byte array
+func ParseBytes(cfg API, input []byte) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: input,
+ head: 0,
+ tail: len(input),
+ depth: 0,
+ }
+}
+
+// ParseString creates an Iterator instance from string
+func ParseString(cfg API, input string) *Iterator {
+ return ParseBytes(cfg, []byte(input))
+}
+
+// Pool returns a pool can provide more iterator with same configuration
+func (iter *Iterator) Pool() IteratorPool {
+ return iter.cfg
+}
+
+// Reset reuse iterator instance by specifying another reader
+func (iter *Iterator) Reset(reader io.Reader) *Iterator {
+ iter.reader = reader
+ iter.head = 0
+ iter.tail = 0
+ iter.depth = 0
+ return iter
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+ iter.reader = nil
+ iter.buf = input
+ iter.head = 0
+ iter.tail = len(input)
+ iter.depth = 0
+ return iter
+}
+
+// WhatIsNext gets ValueType of relatively next json element
+func (iter *Iterator) WhatIsNext() ValueType {
+ valueType := valueTypes[iter.nextToken()]
+ iter.unreadByte()
+ return valueType
+}
+
+func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i
+ return false
+ }
+ return true
+}
+
+func (iter *Iterator) isObjectEnd() bool {
+ c := iter.nextToken()
+ if c == ',' {
+ return false
+ }
+ if c == '}' {
+ return true
+ }
+ iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
+ return true
+}
+
+func (iter *Iterator) nextToken() byte {
+ // a variation of skip whitespaces, returning the next non-whitespace token
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i + 1
+ return c
+ }
+ if !iter.loadMore() {
+ return 0
+ }
+ }
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation string, msg string) {
+ if iter.Error != nil {
+ if iter.Error != io.EOF {
+ return
+ }
+ }
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ peekEnd := iter.head + 10
+ if peekEnd > iter.tail {
+ peekEnd = iter.tail
+ }
+ parsing := string(iter.buf[peekStart:peekEnd])
+ contextStart := iter.head - 50
+ if contextStart < 0 {
+ contextStart = 0
+ }
+ contextEnd := iter.head + 50
+ if contextEnd > iter.tail {
+ contextEnd = iter.tail
+ }
+ context := string(iter.buf[contextStart:contextEnd])
+ iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
+ operation, msg, iter.head-peekStart, parsing, context)
+}
+
+// CurrentBuffer gets current buffer as string for debugging purpose
+func (iter *Iterator) CurrentBuffer() string {
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
+ string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
+}
+
+func (iter *Iterator) readByte() (ret byte) {
+ if iter.head == iter.tail {
+ if iter.loadMore() {
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+ }
+ return 0
+ }
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+}
+
+func (iter *Iterator) loadMore() bool {
+ if iter.reader == nil {
+ if iter.Error == nil {
+ iter.head = iter.tail
+ iter.Error = io.EOF
+ }
+ return false
+ }
+ if iter.captured != nil {
+ iter.captured = append(iter.captured,
+ iter.buf[iter.captureStartedAt:iter.tail]...)
+ iter.captureStartedAt = 0
+ }
+ for {
+ n, err := iter.reader.Read(iter.buf)
+ if n == 0 {
+ if err != nil {
+ if iter.Error == nil {
+ iter.Error = err
+ }
+ return false
+ }
+ } else {
+ iter.head = 0
+ iter.tail = n
+ return true
+ }
+ }
+}
+
+func (iter *Iterator) unreadByte() {
+ if iter.Error != nil {
+ return
+ }
+ iter.head--
+ return
+}
+
+// Read read the next JSON element as generic interface{}.
+func (iter *Iterator) Read() interface{} {
+ valueType := iter.WhatIsNext()
+ switch valueType {
+ case StringValue:
+ return iter.ReadString()
+ case NumberValue:
+ if iter.cfg.configBeforeFrozen.UseNumber {
+ return json.Number(iter.readNumberAsString())
+ }
+ return iter.ReadFloat64()
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ return nil
+ case BoolValue:
+ return iter.ReadBool()
+ case ArrayValue:
+ arr := []interface{}{}
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ arr = append(arr, elem)
+ return true
+ })
+ return arr
+ case ObjectValue:
+ obj := map[string]interface{}{}
+ iter.ReadMapCB(func(Iter *Iterator, field string) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ obj[field] = elem
+ return true
+ })
+ return obj
+ default:
+ iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
+ return nil
+ }
+}
+
+// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
+const maxDepth = 10000
+
+func (iter *Iterator) incrementDepth() (success bool) {
+ iter.depth++
+ if iter.depth <= maxDepth {
+ return true
+ }
+ iter.ReportError("incrementDepth", "exceeded max depth")
+ return false
+}
+
+func (iter *Iterator) decrementDepth() (success bool) {
+ iter.depth--
+ if iter.depth >= 0 {
+ return true
+ }
+ iter.ReportError("decrementDepth", "unexpected negative nesting")
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 000000000..204fe0e09
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,64 @@
+package jsoniter
+
+// ReadArray read array element, tells if the array has more element to read.
+func (iter *Iterator) ReadArray() (ret bool) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false // null
+ case '[':
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ return true
+ }
+ return false
+ case ']':
+ return false
+ case ',':
+ return true
+ default:
+ iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
+ return
+ }
+}
+
+// ReadArrayCB read array with callback
+func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
+ c := iter.nextToken()
+ if c == '[' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ if !callback(iter) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ if !callback(iter) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != ']' {
+ iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ return iter.decrementDepth()
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 000000000..b9754638e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,339 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+ "unsafe"
+)
+
+var floatDigits []int8
+
+const invalidCharForNumber = int8(-1)
+const endOfNumber = int8(-2)
+const dotInNumber = int8(-3)
+
+func init() {
+ floatDigits = make([]int8, 256)
+ for i := 0; i < len(floatDigits); i++ {
+ floatDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ floatDigits[i] = i - int8('0')
+ }
+ floatDigits[','] = endOfNumber
+ floatDigits[']'] = endOfNumber
+ floatDigits['}'] = endOfNumber
+ floatDigits[' '] = endOfNumber
+ floatDigits['\t'] = endOfNumber
+ floatDigits['\n'] = endOfNumber
+ floatDigits['.'] = dotInNumber
+}
+
+// ReadBigFloat read big.Float
+func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ prec := 64
+ if len(str) > prec {
+ prec = len(str)
+ }
+ val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
+ if err != nil {
+ iter.Error = err
+ return nil
+ }
+ return val
+}
+
+// ReadBigInt read big.Int
+func (iter *Iterator) ReadBigInt() (ret *big.Int) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ ret = big.NewInt(0)
+ var success bool
+ ret, success = ret.SetString(str, 10)
+ if !success {
+ iter.ReportError("ReadBigInt", "invalid big int")
+ return nil
+ }
+ return ret
+}
+
+//ReadFloat32 read float32
+func (iter *Iterator) ReadFloat32() (ret float32) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat32()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat32()
+}
+
+func (iter *Iterator) readPositiveFloat32() (ret float32) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat32", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat32", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat32", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float32(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float32(float64(value) / float64(pow10[decimalPlaces]))
+ }
+ // too many decimal places
+ return iter.readFloat32SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat32SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat32SlowPath()
+}
+
+func (iter *Iterator) readNumberAsString() (ret string) {
+ strBuf := [16]byte{}
+ str := strBuf[0:0]
+load_loop:
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ str = append(str, c)
+ continue
+ default:
+ iter.head = i
+ break load_loop
+ }
+ }
+ if !iter.loadMore() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ if len(str) == 0 {
+ iter.ReportError("readNumberAsString", "invalid number")
+ }
+ return *(*string)(unsafe.Pointer(&str))
+}
+
+func (iter *Iterator) readFloat32SlowPath() (ret float32) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat32SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 32)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return float32(val)
+}
+
+// ReadFloat64 read float64
+func (iter *Iterator) ReadFloat64() (ret float64) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat64()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat64()
+}
+
+func (iter *Iterator) readPositiveFloat64() (ret float64) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat64", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat64", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat64", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float64(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float64(value) / float64(pow10[decimalPlaces])
+ }
+ // too many decimal places
+ return iter.readFloat64SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat64SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat64SlowPath()
+}
+
+func (iter *Iterator) readFloat64SlowPath() (ret float64) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat64SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return val
+}
+
+func validateFloat(str string) string {
+ // strconv.ParseFloat is not validating `1.` or `1.e1`
+ if len(str) == 0 {
+ return "empty number"
+ }
+ if str[0] == '-' {
+ return "-- is not valid"
+ }
+ dotPos := strings.IndexByte(str, '.')
+ if dotPos != -1 {
+ if dotPos == len(str)-1 {
+ return "dot can not be last character"
+ }
+ switch str[dotPos+1] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ return "missing digit after dot"
+ }
+ }
+ return ""
+}
+
+// ReadNumber read json.Number
+func (iter *Iterator) ReadNumber() (ret json.Number) {
+ return json.Number(iter.readNumberAsString())
+}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 000000000..214232035
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,345 @@
+package jsoniter
+
+import (
+ "math"
+ "strconv"
+)
+
+var intDigits []int8
+
+const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
+const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+
+func init() {
+ intDigits = make([]int8, 256)
+ for i := 0; i < len(intDigits); i++ {
+ intDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ intDigits[i] = i - int8('0')
+ }
+}
+
+// ReadUint read uint
+func (iter *Iterator) ReadUint() uint {
+ if strconv.IntSize == 32 {
+ return uint(iter.ReadUint32())
+ }
+ return uint(iter.ReadUint64())
+}
+
+// ReadInt read int
+func (iter *Iterator) ReadInt() int {
+ if strconv.IntSize == 32 {
+ return int(iter.ReadInt32())
+ }
+ return int(iter.ReadInt64())
+}
+
+// ReadInt8 read int8
+func (iter *Iterator) ReadInt8() (ret int8) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt8+1 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int8(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt8 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int8(val)
+}
+
+// ReadUint8 read uint8
+func (iter *Iterator) ReadUint8() (ret uint8) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint8 {
+ iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint8(val)
+}
+
+// ReadInt16 read int16
+func (iter *Iterator) ReadInt16() (ret int16) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt16+1 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int16(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt16 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int16(val)
+}
+
+// ReadUint16 read uint16
+func (iter *Iterator) ReadUint16() (ret uint16) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint16 {
+ iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint16(val)
+}
+
+// ReadInt32 read int32
+func (iter *Iterator) ReadInt32() (ret int32) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt32+1 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int32(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt32 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int32(val)
+}
+
+// ReadUint32 read uint32
+func (iter *Iterator) ReadUint32() (ret uint32) {
+ return iter.readUint32(iter.nextToken())
+}
+
+func (iter *Iterator) readUint32(c byte) (ret uint32) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint32(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint32(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint32(ind2)*10 + uint32(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint32SafeToMultiply10 {
+ value2 := (value << 3) + (value << 1) + uint32(ind)
+ if value2 < value {
+ iter.ReportError("readUint32", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint32(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+// ReadInt64 read int64
+func (iter *Iterator) ReadInt64() (ret int64) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint64(iter.readByte())
+ if val > math.MaxInt64+1 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return -int64(val)
+ }
+ val := iter.readUint64(c)
+ if val > math.MaxInt64 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return int64(val)
+}
+
+// ReadUint64 read uint64
+func (iter *Iterator) ReadUint64() uint64 {
+ return iter.readUint64(iter.nextToken())
+}
+
+func (iter *Iterator) readUint64(c byte) (ret uint64) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint64(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint64(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint64(ind2)*10 + uint64(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint64SafeToMultiple10 {
+ value2 := (value << 3) + (value << 1) + uint64(ind)
+ if value2 < value {
+ iter.ReportError("readUint64", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+func (iter *Iterator) assertInteger() {
+ if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+ iter.ReportError("assertInteger", "can not decode float as int")
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 000000000..b65137114
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,267 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ReadObject read one field from object.
+// If object ended, returns empty string.
+// Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() (ret string) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return "" // null
+ case '{':
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ }
+ if c == '}' {
+ return "" // end of object
+ }
+ iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
+ return
+ case ',':
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ case '}':
+ return "" // end of object
+ default:
+ iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
+ return
+ }
+}
+
+// CaseInsensitive
+func (iter *Iterator) readFieldHash() int64 {
+ hash := int64(0x811c9dc5)
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
+ return 0
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ b := iter.buf[i]
+ if b == '\\' {
+ iter.head = i
+ for _, b := range iter.readStringSlowPath() {
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if b == '"' {
+ iter.head = i + 1
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ if !iter.loadMore() {
+ iter.ReportError("readFieldHash", `incomplete field name`)
+ return 0
+ }
+ }
+}
+
+func calcHash(str string, caseSensitive bool) int64 {
+ if !caseSensitive {
+ str = strings.ToLower(str)
+ }
+ hash := int64(0x811c9dc5)
+ for _, b := range []byte(str) {
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ return int64(hash)
+}
+
+// ReadObjectCB read object with callback, the key is ascii only and field name not copied
+func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ var field string
+ if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadObjectCB", `object not ended with }`)
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ if c == '}' {
+ return iter.decrementDepth()
+ }
+ iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+// ReadMapCB read map with callback, the key can be any string
+func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `object not ended with }`)
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ if c == '}' {
+ return iter.decrementDepth()
+ }
+ iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectStart() bool {
+ c := iter.nextToken()
+ if c == '{' {
+ c = iter.nextToken()
+ if c == '}' {
+ return false
+ }
+ iter.unreadByte()
+ return true
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false
+ }
+ iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
+ str := iter.ReadStringAsSlice()
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if iter.buf[iter.head] != ':' {
+ iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
+ return
+ }
+ iter.head++
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if ret == nil {
+ return str
+ }
+ return ret
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 000000000..e91eefb15
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,130 @@
+package jsoniter
+
+import "fmt"
+
+// ReadNil reads a json object as nil and
+// returns whether it's a nil or not
+func (iter *Iterator) ReadNil() (ret bool) {
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return true
+ }
+ iter.unreadByte()
+ return false
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() (ret bool) {
+ c := iter.nextToken()
+ if c == 't' {
+ iter.skipThreeBytes('r', 'u', 'e')
+ return true
+ }
+ if c == 'f' {
+ iter.skipFourBytes('a', 'l', 's', 'e')
+ return false
+ }
+ iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
+ return
+}
+
+// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
+// The []byte can be kept, it is a copy of data.
+func (iter *Iterator) SkipAndReturnBytes() []byte {
+ iter.startCapture(iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+// SkipAndAppendBytes skips next JSON element and appends its content to
+// buffer, returning the result.
+func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
+ iter.startCaptureTo(buf, iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
+ if iter.captured != nil {
+ panic("already in capture mode")
+ }
+ iter.captureStartedAt = captureStartedAt
+ iter.captured = buf
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+ iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
+}
+
+func (iter *Iterator) stopCapture() []byte {
+ if iter.captured == nil {
+ panic("not in capture mode")
+ }
+ captured := iter.captured
+ remaining := iter.buf[iter.captureStartedAt:iter.head]
+ iter.captureStartedAt = -1
+ iter.captured = nil
+ return append(captured, remaining...)
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.skipString()
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ case '0':
+ iter.unreadByte()
+ iter.ReadFloat32()
+ case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.skipNumber()
+ case '[':
+ iter.skipArray()
+ case '{':
+ iter.skipObject()
+ default:
+ iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
+ return
+ }
+}
+
+func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b4 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+}
+
+func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 000000000..9303de41e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,163 @@
+//+build jsoniter_sloppy
+
+package jsoniter
+
+// sloppy but faster implementation, do not validate the input json
+
+func (iter *Iterator) skipNumber() {
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\r', '\t', ',', '}', ']':
+ iter.head = i
+ return
+ }
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipArray() {
+ level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '[': // If open symbol, increase level
+ level++
+ if !iter.incrementDepth() {
+ return
+ }
+ case ']': // If close symbol, increase level
+ level--
+ if !iter.decrementDepth() {
+ return
+ }
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete array")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipObject() {
+ level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '{': // If open symbol, increase level
+ level++
+ if !iter.incrementDepth() {
+ return
+ }
+ case '}': // If close symbol, increase level
+ level--
+ if !iter.decrementDepth() {
+ return
+ }
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete object")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipString() {
+ for {
+ end, escaped := iter.findStringEnd()
+ if end == -1 {
+ if !iter.loadMore() {
+ iter.ReportError("skipString", "incomplete string")
+ return
+ }
+ if escaped {
+ iter.head = 1 // skip the first char as last char read is \
+ }
+ } else {
+ iter.head = end
+ return
+ }
+ }
+}
+
+// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
+// Tries to find the end of string
+// Support if string contains escaped quote symbols.
+func (iter *Iterator) findStringEnd() (int, bool) {
+ escaped := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ if !escaped {
+ return i + 1, false
+ }
+ j := i - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return i + 1, true
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+ }
+ } else if c == '\\' {
+ escaped = true
+ }
+ }
+ j := iter.tail - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return -1, false // do not end with \
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+
+ }
+ return -1, true // end with \
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 000000000..6cf66d043
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,99 @@
+//+build !jsoniter_sloppy
+
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+)
+
+func (iter *Iterator) skipNumber() {
+ if !iter.trySkipNumber() {
+ iter.unreadByte()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = nil
+ iter.ReadBigFloat()
+ }
+ }
+}
+
+func (iter *Iterator) trySkipNumber() bool {
+ dotFound := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ case '.':
+ if dotFound {
+ iter.ReportError("validateNumber", `more than one dot found in number`)
+ return true // already failed
+ }
+ if i+1 == iter.tail {
+ return false
+ }
+ c = iter.buf[i+1]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ iter.ReportError("validateNumber", `missing digit after dot`)
+ return true // already failed
+ }
+ dotFound = true
+ default:
+ switch c {
+ case ',', ']', '}', ' ', '\t', '\n', '\r':
+ if iter.head == i {
+ return false // if - without following digits
+ }
+ iter.head = i
+ return true // must be valid
+ }
+ return false // may be invalid
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipString() {
+ if !iter.trySkipString() {
+ iter.unreadByte()
+ iter.ReadString()
+ }
+}
+
+func (iter *Iterator) trySkipString() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ iter.head = i + 1
+ return true // valid
+ } else if c == '\\' {
+ return false
+ } else if c < ' ' {
+ iter.ReportError("trySkipString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return true // already failed
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipObject() {
+ iter.unreadByte()
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ return true
+ })
+}
+
+func (iter *Iterator) skipArray() {
+ iter.unreadByte()
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ iter.Skip()
+ return true
+ })
+}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 000000000..adc487ea8
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
+package jsoniter
+
+import (
+ "fmt"
+ "unicode/utf16"
+)
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() (ret string) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ ret = string(iter.buf[iter.head:i])
+ iter.head = i + 1
+ return ret
+ } else if c == '\\' {
+ break
+ } else if c < ' ' {
+ iter.ReportError("ReadString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return
+ }
+ }
+ return iter.readStringSlowPath()
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return ""
+ }
+ iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readStringSlowPath() (ret string) {
+ var str []byte
+ var c byte
+ for iter.Error == nil {
+ c = iter.readByte()
+ if c == '"' {
+ return string(str)
+ }
+ if c == '\\' {
+ c = iter.readByte()
+ str = iter.readEscapedChar(c, str)
+ } else {
+ str = append(str, c)
+ }
+ }
+ iter.ReportError("readStringSlowPath", "unexpected end of input")
+ return
+}
+
+func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
+ switch c {
+ case 'u':
+ r := iter.readU4()
+ if utf16.IsSurrogate(r) {
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != '\\' {
+ iter.unreadByte()
+ str = appendRune(str, r)
+ return str
+ }
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != 'u' {
+ str = appendRune(str, r)
+ return iter.readEscapedChar(c, str)
+ }
+ r2 := iter.readU4()
+ if iter.Error != nil {
+ return nil
+ }
+ combined := utf16.DecodeRune(r, r2)
+ if combined == '\uFFFD' {
+ str = appendRune(str, r)
+ str = appendRune(str, r2)
+ } else {
+ str = appendRune(str, combined)
+ }
+ } else {
+ str = appendRune(str, r)
+ }
+ case '"':
+ str = append(str, '"')
+ case '\\':
+ str = append(str, '\\')
+ case '/':
+ str = append(str, '/')
+ case 'b':
+ str = append(str, '\b')
+ case 'f':
+ str = append(str, '\f')
+ case 'n':
+ str = append(str, '\n')
+ case 'r':
+ str = append(str, '\r')
+ case 't':
+ str = append(str, '\t')
+ default:
+ iter.ReportError("readEscapedChar",
+ `invalid escape char after \`)
+ return nil
+ }
+ return str
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte can not be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ // for: field name, base64, number
+ if iter.buf[i] == '"' {
+ // fast path: reuse the underlying buffer
+ ret = iter.buf[iter.head:i]
+ iter.head = i + 1
+ return ret
+ }
+ }
+ readLen := iter.tail - iter.head
+ copied := make([]byte, readLen, readLen*2)
+ copy(copied, iter.buf[iter.head:iter.tail])
+ iter.head = iter.tail
+ for iter.Error == nil {
+ c := iter.readByte()
+ if c == '"' {
+ return copied
+ }
+ copied = append(copied, c)
+ }
+ return copied
+ }
+ iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readU4() (ret rune) {
+ for i := 0; i < 4; i++ {
+ c := iter.readByte()
+ if iter.Error != nil {
+ return
+ }
+ if c >= '0' && c <= '9' {
+ ret = ret*16 + rune(c-'0')
+ } else if c >= 'a' && c <= 'f' {
+ ret = ret*16 + rune(c-'a'+10)
+ } else if c >= 'A' && c <= 'F' {
+ ret = ret*16 + rune(c-'A'+10)
+ } else {
+ iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
+ return
+ }
+ }
+ return ret
+}
+
+const (
+ t1 = 0x00 // 0000 0000
+ tx = 0x80 // 1000 0000
+ t2 = 0xC0 // 1100 0000
+ t3 = 0xE0 // 1110 0000
+ t4 = 0xF0 // 1111 0000
+ t5 = 0xF8 // 1111 1000
+
+ maskx = 0x3F // 0011 1111
+ mask2 = 0x1F // 0001 1111
+ mask3 = 0x0F // 0000 1111
+ mask4 = 0x07 // 0000 0111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+
+ surrogateMin = 0xD800
+ surrogateMax = 0xDFFF
+
+ maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
+)
+
+func appendRune(p []byte, r rune) []byte {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune1Max:
+ p = append(p, byte(r))
+ return p
+ case i <= rune2Max:
+ p = append(p, t2|byte(r>>6))
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+ r = runeError
+ fallthrough
+ case i <= rune3Max:
+ p = append(p, t3|byte(r>>12))
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ default:
+ p = append(p, t4|byte(r>>18))
+ p = append(p, tx|byte(r>>12)&maskx)
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 000000000..c2934f916
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
+// Package jsoniter implements encoding and decoding of JSON as defined in
+// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
+// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
+// and variable type declarations (if any).
+// jsoniter interfaces gives 100% compatibility with code using standard lib.
+//
+// "JSON and Go"
+// (https://golang.org/doc/articles/json_and_go.html)
+// gives a description of how Marshal/Unmarshal operate
+// between arbitrary or predefined json objects and bytes,
+// and it applies to jsoniter.Marshal/Unmarshal as well.
+//
+// Besides, jsoniter.Iterator provides a different set of interfaces
+// iterating given bytes/string/reader
+// and yielding parsed elements one by one.
+// This set of interfaces reads input as required and gives
+// better performance.
+package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 000000000..e2389b56c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// IteratorPool a thread safe pool of iterators with same configuration
+type IteratorPool interface {
+ BorrowIterator(data []byte) *Iterator
+ ReturnIterator(iter *Iterator)
+}
+
+// StreamPool a thread safe pool of streams with same configuration
+type StreamPool interface {
+ BorrowStream(writer io.Writer) *Stream
+ ReturnStream(stream *Stream)
+}
+
+func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
+ stream := cfg.streamPool.Get().(*Stream)
+ stream.Reset(writer)
+ return stream
+}
+
+func (cfg *frozenConfig) ReturnStream(stream *Stream) {
+ stream.out = nil
+ stream.Error = nil
+ stream.Attachment = nil
+ cfg.streamPool.Put(stream)
+}
+
+func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
+ iter := cfg.iteratorPool.Get().(*Iterator)
+ iter.ResetBytes(data)
+ return iter
+}
+
+func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
+ iter.Error = nil
+ iter.Attachment = nil
+ cfg.iteratorPool.Put(iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 000000000..74974ba74
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,337 @@
+package jsoniter
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+// ValDecoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValDecoder with json.Decoder.
+// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
+//
+// Reflection on type to create decoders, which is then cached
+// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
+// 1. create instance of new value, for example *int will need a int to be allocated
+// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
+// 3. assignment to map, both key and value will be reflect.Value
+// For a simple struct binding, it will be reflect.Value free and allocation free
+type ValDecoder interface {
+ Decode(ptr unsafe.Pointer, iter *Iterator)
+}
+
+// ValEncoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValEncoder with json.Encoder.
+// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
+type ValEncoder interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+ Encode(ptr unsafe.Pointer, stream *Stream)
+}
+
+type checkIsEmpty interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+}
+
+type ctx struct {
+ *frozenConfig
+ prefix string
+ encoders map[reflect2.Type]ValEncoder
+ decoders map[reflect2.Type]ValDecoder
+}
+
+func (b *ctx) caseSensitive() bool {
+ if b.frozenConfig == nil {
+ // default is case-insensitive
+ return false
+ }
+ return b.frozenConfig.caseSensitive
+}
+
+func (b *ctx) append(prefix string) *ctx {
+ return &ctx{
+ frozenConfig: b.frozenConfig,
+ prefix: b.prefix + " " + prefix,
+ encoders: b.encoders,
+ decoders: b.decoders,
+ }
+}
+
+// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
+func (iter *Iterator) ReadVal(obj interface{}) {
+ depth := iter.depth
+ cacheKey := reflect2.RTypeOf(obj)
+ decoder := iter.cfg.getDecoderFromCache(cacheKey)
+ if decoder == nil {
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ iter.ReportError("ReadVal", "can only unmarshal into pointer")
+ return
+ }
+ decoder = iter.cfg.DecoderOf(typ)
+ }
+ ptr := reflect2.PtrOf(obj)
+ if ptr == nil {
+ iter.ReportError("ReadVal", "can not read into nil pointer")
+ return
+ }
+ decoder.Decode(ptr, iter)
+ if iter.depth != depth {
+ iter.ReportError("ReadVal", "unexpected mismatched nesting")
+ return
+ }
+}
+
+// WriteVal copy the go interface into underlying JSON, same as json.Marshal
+func (stream *Stream) WriteVal(val interface{}) {
+ if nil == val {
+ stream.WriteNil()
+ return
+ }
+ cacheKey := reflect2.RTypeOf(val)
+ encoder := stream.cfg.getEncoderFromCache(cacheKey)
+ if encoder == nil {
+ typ := reflect2.TypeOf(val)
+ encoder = stream.cfg.EncoderOf(typ)
+ }
+ encoder.Encode(reflect2.PtrOf(val), stream)
+}
+
+func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
+ cacheKey := typ.RType()
+ decoder := cfg.getDecoderFromCache(cacheKey)
+ if decoder != nil {
+ return decoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder = decoderOfType(ctx, ptrType.Elem())
+ cfg.addDecoderToCache(cacheKey, decoder)
+ return decoder
+}
+
+func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ return decoder
+}
+
+func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoders[typ]
+ if decoder != nil {
+ return decoder
+ }
+ placeholder := &placeholderDecoder{}
+ ctx.decoders[typ] = placeholder
+ decoder = _createDecoderOfType(ctx, typ)
+ placeholder.decoder = decoder
+ return decoder
+}
+
+func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := createDecoderOfJsonRawMessage(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfJsonNumber(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfMarshaler(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfAny(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfNative(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ switch typ.Kind() {
+ case reflect.Interface:
+ ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
+ if isIFace {
+ return &ifaceDecoder{valType: ifaceType}
+ }
+ return &efaceDecoder{}
+ case reflect.Struct:
+ return decoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return decoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return decoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return decoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return decoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
+ cacheKey := typ.RType()
+ encoder := cfg.getEncoderFromCache(cacheKey)
+ if encoder != nil {
+ return encoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ encoder = encoderOfType(ctx, typ)
+ if typ.LikePtr() {
+ encoder = &onePtrEncoder{encoder}
+ }
+ cfg.addEncoderToCache(cacheKey, encoder)
+ return encoder
+}
+
+type onePtrEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ return encoder
+}
+
+func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoders[typ]
+ if encoder != nil {
+ return encoder
+ }
+ placeholder := &placeholderEncoder{}
+ ctx.encoders[typ] = placeholder
+ encoder = _createEncoderOfType(ctx, typ)
+ placeholder.encoder = encoder
+ return encoder
+}
+func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := createEncoderOfJsonRawMessage(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfJsonNumber(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfMarshaler(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfAny(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return encoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return encoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return encoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return encoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+type lazyErrorDecoder struct {
+ err error
+}
+
+func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() != NilValue {
+ if iter.Error == nil {
+ iter.Error = decoder.err
+ }
+ } else {
+ iter.Skip()
+ }
+}
+
+type lazyErrorEncoder struct {
+ err error
+}
+
+func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if ptr == nil {
+ stream.WriteNil()
+ } else if stream.Error == nil {
+ stream.Error = encoder.err
+ }
+}
+
+func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type placeholderDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(ptr, iter)
+}
+
+type placeholderEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(ptr, stream)
+}
+
+func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 000000000..13a0b7b08
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayDecoder{arrayType, decoder}
+}
+
+func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ if arrayType.Len() == 0 {
+ return emptyArrayEncoder{}
+ }
+ encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayEncoder{arrayType, encoder}
+}
+
+type emptyArrayEncoder struct{}
+
+func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyArray()
+}
+
+func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return true
+}
+
+type arrayEncoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemEncoder ValEncoder
+}
+
+func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteArrayStart()
+ elemPtr := unsafe.Pointer(ptr)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ for i := 1; i < encoder.arrayType.Len(); i++ {
+ stream.WriteMore()
+ elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
+ }
+}
+
+func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type arrayDecoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemDecoder ValDecoder
+}
+
+func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
+ }
+}
+
+func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ arrayType := decoder.arrayType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ return
+ }
+ iter.unreadByte()
+ elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ if length >= arrayType.Len() {
+ iter.Skip()
+ continue
+ }
+ idx := length
+ length += 1
+ elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 000000000..8b6bc8b43
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+type dynamicEncoder struct {
+ valType reflect2.Type
+}
+
+func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ stream.WriteVal(obj)
+}
+
+func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.valType.UnsafeIndirect(ptr) == nil
+}
+
+type efaceDecoder struct {
+}
+
+func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ pObj := (*interface{})(ptr)
+ obj := *pObj
+ if obj == nil {
+ *pObj = iter.Read()
+ return
+ }
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ *pObj = iter.Read()
+ return
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ ptrElemType := ptrType.Elem()
+ if iter.WhatIsNext() == NilValue {
+ if ptrElemType.Kind() != reflect.Ptr {
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *pObj = nil
+ return
+ }
+ }
+ if reflect2.IsNil(obj) {
+ obj := ptrElemType.New()
+ iter.ReadVal(obj)
+ *pObj = obj
+ return
+ }
+ iter.ReadVal(obj)
+}
+
+type ifaceDecoder struct {
+ valType *reflect2.UnsafeIFaceType
+}
+
+func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
+ return
+ }
+ obj := decoder.valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ iter.ReportError("decode non empty interface", "can not unmarshal into nil")
+ return
+ }
+ iter.ReadVal(obj)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 000000000..80320cd64
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+ "unsafe"
+)
+
+var typeDecoders = map[string]ValDecoder{}
+var fieldDecoders = map[string]ValDecoder{}
+var typeEncoders = map[string]ValEncoder{}
+var fieldEncoders = map[string]ValEncoder{}
+var extensions = []Extension{}
+
+// StructDescriptor describe how should we encode/decode the struct
+type StructDescriptor struct {
+ Type reflect2.Type
+ Fields []*Binding
+}
+
+// GetField get one field from the descriptor by its name.
+// Can not use map here to keep field orders.
+func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
+ for _, binding := range structDescriptor.Fields {
+ if binding.Field.Name() == fieldName {
+ return binding
+ }
+ }
+ return nil
+}
+
+// Binding describe how should we encode/decode the struct field
+type Binding struct {
+ levels []int
+ Field reflect2.StructField
+ FromNames []string
+ ToNames []string
+ Encoder ValEncoder
+ Decoder ValDecoder
+}
+
+// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
+// Can also rename fields by UpdateStructDescriptor.
+type Extension interface {
+ UpdateStructDescriptor(structDescriptor *StructDescriptor)
+ CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
+ CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
+ CreateDecoder(typ reflect2.Type) ValDecoder
+ CreateEncoder(typ reflect2.Type) ValEncoder
+ DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
+ DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
+}
+
+// DummyExtension embed this type get dummy implementation for all methods of Extension
+type DummyExtension struct {
+}
+
+// UpdateStructDescriptor No-op
+func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder No-op
+func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder No-op
+func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type EncoderExtension map[reflect2.Type]ValEncoder
+
+// UpdateStructDescriptor No-op
+func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateDecoder No-op
+func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder get encoder from map
+func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return extension[typ]
+}
+
+// CreateMapKeyDecoder No-op
+func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type DecoderExtension map[reflect2.Type]ValDecoder
+
+// UpdateStructDescriptor No-op
+func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder get decoder from map
+func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return extension[typ]
+}
+
+// CreateEncoder No-op
+func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type funcDecoder struct {
+ fun DecoderFunc
+}
+
+func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.fun(ptr, iter)
+}
+
+type funcEncoder struct {
+ fun EncoderFunc
+ isEmptyFunc func(ptr unsafe.Pointer) bool
+}
+
+func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.fun(ptr, stream)
+}
+
+func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ if encoder.isEmptyFunc == nil {
+ return false
+ }
+ return encoder.isEmptyFunc(ptr)
+}
+
+// DecoderFunc the function form of TypeDecoder
+type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
+
+// EncoderFunc the function form of TypeEncoder
+type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
+
+// RegisterTypeDecoderFunc register TypeDecoder for a type with function
+func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
+ typeDecoders[typ] = &funcDecoder{fun}
+}
+
+// RegisterTypeDecoder register TypeDecoder for a typ
+func RegisterTypeDecoder(typ string, decoder ValDecoder) {
+ typeDecoders[typ] = decoder
+}
+
+// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
+func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
+ RegisterFieldDecoder(typ, field, &funcDecoder{fun})
+}
+
+// RegisterFieldDecoder register TypeDecoder for a struct field
+func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
+ fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
+}
+
+// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
+func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
+}
+
+// RegisterTypeEncoder register TypeEncoder for a type
+func RegisterTypeEncoder(typ string, encoder ValEncoder) {
+ typeEncoders[typ] = encoder
+}
+
+// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
+func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
+}
+
+// RegisterFieldEncoder register TypeEncoder for a struct field
+func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
+ fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
+}
+
+// RegisterExtension register extension
+func RegisterExtension(extension Extension) {
+ extensions = append(extensions, extension)
+}
+
+func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := _getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ }
+ return decoder
+}
+func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ for _, extension := range extensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ decoder := ctx.decoderExtension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ typeName := typ.String()
+ decoder = typeDecoders[typeName]
+ if decoder != nil {
+ return decoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder := typeDecoders[ptrType.Elem().String()]
+ if decoder != nil {
+ return &OptionalDecoder{ptrType.Elem(), decoder}
+ }
+ }
+ return nil
+}
+
+func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := _getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ }
+ return encoder
+}
+
+func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ for _, extension := range extensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ encoder := ctx.encoderExtension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ typeName := typ.String()
+ encoder = typeEncoders[typeName]
+ if encoder != nil {
+ return encoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ typePtr := typ.(*reflect2.UnsafePtrType)
+ encoder := typeEncoders[typePtr.Elem().String()]
+ if encoder != nil {
+ return &OptionalEncoder{encoder}
+ }
+ }
+ return nil
+}
+
+func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
+ structType := typ.(*reflect2.UnsafeStructType)
+ embeddedBindings := []*Binding{}
+ bindings := []*Binding{}
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ tag, hastag := field.Tag().Lookup(ctx.getTagKey())
+ if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
+ continue
+ }
+ if tag == "-" || field.Name() == "_" {
+ continue
+ }
+ tagParts := strings.Split(tag, ",")
+ if field.Anonymous() && (tag == "" || tagParts[0] == "") {
+ if field.Type().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, field.Type())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ } else if field.Type().Kind() == reflect.Ptr {
+ ptrType := field.Type().(*reflect2.UnsafePtrType)
+ if ptrType.Elem().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, ptrType.Elem())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &dereferenceEncoder{binding.Encoder}
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ }
+ }
+ }
+ fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
+ fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
+ decoder := fieldDecoders[fieldCacheKey]
+ if decoder == nil {
+ decoder = decoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ encoder := fieldEncoders[fieldCacheKey]
+ if encoder == nil {
+ encoder = encoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ binding := &Binding{
+ Field: field,
+ FromNames: fieldNames,
+ ToNames: fieldNames,
+ Decoder: decoder,
+ Encoder: encoder,
+ }
+ binding.levels = []int{i}
+ bindings = append(bindings, binding)
+ }
+ return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
+}
+func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
+ structDescriptor := &StructDescriptor{
+ Type: typ,
+ Fields: bindings,
+ }
+ for _, extension := range extensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
+ ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
+ for _, extension := range ctx.extraExtensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ processTags(structDescriptor, ctx.frozenConfig)
+ // merge normal & embedded bindings & sort with original order
+ allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
+ sort.Sort(allBindings)
+ structDescriptor.Fields = allBindings
+ return structDescriptor
+}
+
+type sortableBindings []*Binding
+
+func (bindings sortableBindings) Len() int {
+ return len(bindings)
+}
+
+func (bindings sortableBindings) Less(i, j int) bool {
+ left := bindings[i].levels
+ right := bindings[j].levels
+ k := 0
+ for {
+ if left[k] < right[k] {
+ return true
+ } else if left[k] > right[k] {
+ return false
+ }
+ k++
+ }
+}
+
+func (bindings sortableBindings) Swap(i, j int) {
+ bindings[i], bindings[j] = bindings[j], bindings[i]
+}
+
+func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
+ for _, binding := range structDescriptor.Fields {
+ shouldOmitEmpty := false
+ tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
+ for _, tagPart := range tagParts[1:] {
+ if tagPart == "omitempty" {
+ shouldOmitEmpty = true
+ } else if tagPart == "string" {
+ if binding.Field.Type().Kind() == reflect.String {
+ binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
+ binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
+ } else {
+ binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
+ binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
+ }
+ }
+ }
+ binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
+ binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
+ }
+}
+
+func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
+ // ignore?
+ if wholeTag == "-" {
+ return []string{}
+ }
+ // rename?
+ var fieldNames []string
+ if tagProvidedFieldName == "" {
+ fieldNames = []string{originalFieldName}
+ } else {
+ fieldNames = []string{tagProvidedFieldName}
+ }
+ // private?
+ isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+ if isNotExported {
+ fieldNames = []string{}
+ }
+ return fieldNames
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 000000000..98d45c1ec
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "strconv"
+ "unsafe"
+)
+
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+func CastJsonNumber(val interface{}) (string, bool) {
+ switch typedVal := val.(type) {
+ case json.Number:
+ return string(typedVal), true
+ case Number:
+ return string(typedVal), true
+ }
+ return "", false
+}
+
+var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
+var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
+
+func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+type jsonNumberCodec struct {
+}
+
+func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*json.Number)(ptr)) = json.Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*json.Number)(ptr)) = ""
+ default:
+ *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*json.Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.Number)(ptr))) == 0
+}
+
+type jsoniterNumberCodec struct {
+}
+
+func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*Number)(ptr)) = Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*Number)(ptr)) = ""
+ default:
+ *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*Number)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 000000000..f2619936c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,60 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "unsafe"
+)
+
+var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
+var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
+
+func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+type jsonRawMessageCodec struct {
+}
+
+func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+}
+
+func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+}
+
+type jsoniterRawMessageCodec struct {
+}
+
+func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteRaw(string(*((*RawMessage)(ptr))))
+}
+
+func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*RawMessage)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 000000000..9e2b623fe
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,346 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
+ elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
+ return &mapDecoder{
+ mapType: mapType,
+ keyType: mapType.Key(),
+ elemType: mapType.Elem(),
+ keyDecoder: keyDecoder,
+ elemDecoder: elemDecoder,
+ }
+}
+
+func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ if ctx.sortMapKeys {
+ return &sortKeysMapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+ }
+ return &mapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+}
+
+func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ switch typ.Kind() {
+ case reflect.String:
+ return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
+ default:
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(textUnmarshalerType) {
+ return &textUnmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ switch typ.Kind() {
+ case reflect.String:
+ return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
+ default:
+ if typ == textMarshalerType {
+ return &directTextMarshalerEncoder{
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Implements(textMarshalerType) {
+ return &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Kind() == reflect.Interface {
+ return &dynamicMapKeyEncoder{ctx, typ}
+ }
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+type mapDecoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyType reflect2.Type
+ elemType reflect2.Type
+ keyDecoder ValDecoder
+ elemDecoder ValDecoder
+}
+
+func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ mapType := decoder.mapType
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ *(*unsafe.Pointer)(ptr) = nil
+ mapType.UnsafeSet(ptr, mapType.UnsafeNew())
+ return
+ }
+ if mapType.UnsafeIsNil(ptr) {
+ mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
+ }
+ if c != '{' {
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == '}' {
+ return
+ }
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ return
+ }
+ iter.unreadByte()
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
+ }
+}
+
+type numericMapKeyDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.decoder.Decode(ptr, iter)
+ c = iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
+
+type numericMapKeyEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.encoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type dynamicMapKeyEncoder struct {
+ ctx *ctx
+ valType reflect2.Type
+}
+
+func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
+}
+
+func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
+}
+
+type mapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ for i := 0; iter.HasNext(); i++ {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ key, elem := iter.UnsafeNext()
+ encoder.keyEncoder.Encode(key, stream)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ stream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, stream)
+ }
+ stream.WriteObjectEnd()
+}
+
+func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type sortKeysMapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ mapIter := encoder.mapType.UnsafeIterate(ptr)
+ subStream := stream.cfg.BorrowStream(nil)
+ subStream.Attachment = stream.Attachment
+ subIter := stream.cfg.BorrowIterator(nil)
+ keyValues := encodedKeyValues{}
+ for mapIter.HasNext() {
+ key, elem := mapIter.UnsafeNext()
+ subStreamIndex := subStream.Buffered()
+ encoder.keyEncoder.Encode(key, subStream)
+ if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ encodedKey := subStream.Buffer()[subStreamIndex:]
+ subIter.ResetBytes(encodedKey)
+ decodedKey := subIter.ReadString()
+ if stream.indention > 0 {
+ subStream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ subStream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, subStream)
+ keyValues = append(keyValues, encodedKV{
+ key: decodedKey,
+ keyValue: subStream.Buffer()[subStreamIndex:],
+ })
+ }
+ sort.Sort(keyValues)
+ for i, keyValue := range keyValues {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ stream.Write(keyValue.keyValue)
+ }
+ if subStream.Error != nil && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ stream.WriteObjectEnd()
+ stream.cfg.ReturnStream(subStream)
+ stream.cfg.ReturnIterator(subIter)
+}
+
+func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type encodedKeyValues []encodedKV
+
+type encodedKV struct {
+ key string
+ keyValue []byte
+}
+
+func (sv encodedKeyValues) Len() int { return len(sv) }
+func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 000000000..3e21f3756
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,225 @@
+package jsoniter
+
+import (
+ "encoding"
+ "encoding/json"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
+var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
+var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{ptrType},
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{ptrType},
+ }
+ }
+ return nil
+}
+
+func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == marshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ if typ.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: typ,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ ptrType := reflect2.PtrTo(typ)
+ if ctx.prefix != "" && ptrType.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: ptrType,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ if typ == textMarshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directTextMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ return encoder
+ }
+ if typ.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ // if prefix is empty, the type is the root type
+ if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: ptrType,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ return nil
+}
+
+type marshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+ valType reflect2.Type
+}
+
+func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := obj.(json.Marshaler)
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ // html escape was already done by jsoniter
+ // but the extra '\n' should be trimed
+ l := len(bytes)
+ if l > 0 && bytes[l-1] == '\n' {
+ bytes = bytes[:l-1]
+ }
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directMarshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*json.Marshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type textMarshalerEncoder struct {
+ valType reflect2.Type
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := (obj).(encoding.TextMarshaler)
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directTextMarshalerEncoder struct {
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*encoding.TextMarshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type unmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ unmarshaler := obj.(json.Unmarshaler)
+ iter.nextToken()
+ iter.unreadByte() // skip spaces
+ bytes := iter.SkipAndReturnBytes()
+ err := unmarshaler.UnmarshalJSON(bytes)
+ if err != nil {
+ iter.ReportError("unmarshalerDecoder", err.Error())
+ }
+}
+
+type textUnmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ ptrType := valType.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elem := elemType.UnsafeNew()
+ ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
+ obj = valType.UnsafeIndirect(ptr)
+ }
+ unmarshaler := (obj).(encoding.TextUnmarshaler)
+ str := iter.ReadString()
+ err := unmarshaler.UnmarshalText([]byte(str))
+ if err != nil {
+ iter.ReportError("textUnmarshalerDecoder", err.Error())
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 000000000..f88722d14
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,453 @@
+package jsoniter
+
+import (
+ "encoding/base64"
+ "reflect"
+ "strconv"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+const ptrSize = 32 << uintptr(^uintptr(0)>>63)
+
+func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ kind := typ.Kind()
+ switch kind {
+ case reflect.String:
+ if typeName != "string" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ switch typ.Kind() {
+ case reflect.String:
+ if typeName != "string" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+type stringCodec struct {
+}
+
+func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*string)(ptr)) = iter.ReadString()
+}
+
+func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteString(str)
+}
+
+func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+type int8Codec struct {
+}
+
+func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int8)(ptr)) = iter.ReadInt8()
+ }
+}
+
+func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt8(*((*int8)(ptr)))
+}
+
+func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int8)(ptr)) == 0
+}
+
+type int16Codec struct {
+}
+
+func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int16)(ptr)) = iter.ReadInt16()
+ }
+}
+
+func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt16(*((*int16)(ptr)))
+}
+
+func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int16)(ptr)) == 0
+}
+
+type int32Codec struct {
+}
+
+func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int32)(ptr)) = iter.ReadInt32()
+ }
+}
+
+func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt32(*((*int32)(ptr)))
+}
+
+func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int32)(ptr)) == 0
+}
+
+type int64Codec struct {
+}
+
+func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int64)(ptr)) = iter.ReadInt64()
+ }
+}
+
+func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt64(*((*int64)(ptr)))
+}
+
+func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int64)(ptr)) == 0
+}
+
+type uint8Codec struct {
+}
+
+func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint8)(ptr)) = iter.ReadUint8()
+ }
+}
+
+func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint8(*((*uint8)(ptr)))
+}
+
+func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint8)(ptr)) == 0
+}
+
+type uint16Codec struct {
+}
+
+func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint16)(ptr)) = iter.ReadUint16()
+ }
+}
+
+func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint16(*((*uint16)(ptr)))
+}
+
+func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint16)(ptr)) == 0
+}
+
+type uint32Codec struct {
+}
+
+func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint32)(ptr)) = iter.ReadUint32()
+ }
+}
+
+func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint32(*((*uint32)(ptr)))
+}
+
+func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint32)(ptr)) == 0
+}
+
+type uint64Codec struct {
+}
+
+func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint64)(ptr)) = iter.ReadUint64()
+ }
+}
+
+func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint64(*((*uint64)(ptr)))
+}
+
+func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint64)(ptr)) == 0
+}
+
+type float32Codec struct {
+}
+
+func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float32)(ptr)) = iter.ReadFloat32()
+ }
+}
+
+func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32(*((*float32)(ptr)))
+}
+
+func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type float64Codec struct {
+}
+
+func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float64)(ptr)) = iter.ReadFloat64()
+ }
+}
+
+func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64(*((*float64)(ptr)))
+}
+
+func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+type boolCodec struct {
+}
+
+func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*bool)(ptr)) = iter.ReadBool()
+ }
+}
+
+func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteBool(*((*bool)(ptr)))
+}
+
+func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return !(*((*bool)(ptr)))
+}
+
+type base64Codec struct {
+ sliceType *reflect2.UnsafeSliceType
+ sliceDecoder ValDecoder
+}
+
+func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ codec.sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ switch iter.WhatIsNext() {
+ case StringValue:
+ src := iter.ReadString()
+ dst, err := base64.StdEncoding.DecodeString(src)
+ if err != nil {
+ iter.ReportError("decode base64", err.Error())
+ } else {
+ codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
+ }
+ case ArrayValue:
+ codec.sliceDecoder.Decode(ptr, iter)
+ default:
+ iter.ReportError("base64Codec", "invalid input")
+ }
+}
+
+func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if codec.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ src := *((*[]byte)(ptr))
+ encoding := base64.StdEncoding
+ stream.writeByte('"')
+ if len(src) != 0 {
+ size := encoding.EncodedLen(len(src))
+ buf := make([]byte, size)
+ encoding.Encode(buf, src)
+ stream.buf = append(stream.buf, buf...)
+ }
+ stream.writeByte('"')
+}
+
+func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*[]byte)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 000000000..43ec71d6d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,133 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ decoder := decoderOfType(ctx, elemType)
+ if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
+ return &dereferenceDecoder{elemType, decoder}
+ }
+ return &OptionalDecoder{elemType, decoder}
+}
+
+func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elemEncoder := encoderOfType(ctx, elemType)
+ encoder := &OptionalEncoder{elemEncoder}
+ return encoder
+}
+
+type OptionalDecoder struct {
+ ValueType reflect2.Type
+ ValueDecoder ValDecoder
+}
+
+func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ *((*unsafe.Pointer)(ptr)) = nil
+ } else {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.ValueType.UnsafeNew()
+ decoder.ValueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+ }
+}
+
+type dereferenceDecoder struct {
+ // only to deference a pointer
+ valueType reflect2.Type
+ valueDecoder ValDecoder
+}
+
+func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.valueType.UnsafeNew()
+ decoder.valueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+}
+
+type OptionalEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*unsafe.Pointer)(ptr)) == nil
+}
+
+type dereferenceEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ dePtr := *((*unsafe.Pointer)(ptr))
+ if dePtr == nil {
+ return true
+ }
+ return encoder.ValueEncoder.IsEmpty(dePtr)
+}
+
+func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ deReferenced := *((*unsafe.Pointer)(ptr))
+ if deReferenced == nil {
+ return true
+ }
+ isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := unsafe.Pointer(deReferenced)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type referenceEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+type referenceDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 000000000..9441d79df
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceDecoder{sliceType, decoder}
+}
+
+func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceEncoder{sliceType, encoder}
+}
+
+type sliceEncoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemEncoder ValEncoder
+}
+
+func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if encoder.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ length := encoder.sliceType.UnsafeLengthOf(ptr)
+ if length == 0 {
+ stream.WriteEmptyArray()
+ return
+ }
+ stream.WriteArrayStart()
+ encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
+ for i := 1; i < length; i++ {
+ stream.WriteMore()
+ elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
+ }
+}
+
+func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.sliceType.UnsafeLengthOf(ptr) == 0
+}
+
+type sliceDecoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemDecoder ValDecoder
+}
+
+func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
+ }
+}
+
+func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ sliceType := decoder.sliceType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
+ return
+ }
+ iter.unreadByte()
+ sliceType.UnsafeGrow(ptr, 1)
+ elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ idx := length
+ length += 1
+ sliceType.UnsafeGrow(ptr, length)
+ elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 000000000..5ad5cc561
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1092 @@
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
+ bindings := map[string]*Binding{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, fromName := range binding.FromNames {
+ old := bindings[fromName]
+ if old == nil {
+ bindings[fromName] = binding
+ continue
+ }
+ ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
+ if ignoreOld {
+ delete(bindings, fromName)
+ }
+ if !ignoreNew {
+ bindings[fromName] = binding
+ }
+ }
+ }
+ fields := map[string]*structFieldDecoder{}
+ for k, binding := range bindings {
+ fields[k] = binding.Decoder.(*structFieldDecoder)
+ }
+
+ if !ctx.caseSensitive() {
+ for k, binding := range bindings {
+ if _, found := fields[strings.ToLower(k)]; !found {
+ fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
+ }
+ }
+ }
+
+ return createStructDecoder(ctx, typ, fields)
+}
+
+func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
+ if ctx.disallowUnknownFields {
+ return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
+ }
+ knownHash := map[int64]struct{}{
+ 0: {},
+ }
+
+ switch len(fields) {
+ case 0:
+ return &skipObjectDecoder{typ}
+ case 1:
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
+ }
+ case 2:
+ var fieldHash1 int64
+ var fieldHash2 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldHash1 == 0 {
+ fieldHash1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else {
+ fieldHash2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ }
+ }
+ return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
+ case 3:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ }
+ }
+ return &threeFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3}
+ case 4:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ }
+ }
+ return &fourFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4}
+ case 5:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ }
+ }
+ return &fiveFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5}
+ case 6:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ }
+ }
+ return &sixFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6}
+ case 7:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ }
+ }
+ return &sevenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7}
+ case 8:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ }
+ }
+ return &eightFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8}
+ case 9:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ }
+ }
+ return &nineFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9}
+ case 10:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldName10 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ var fieldDecoder10 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else if fieldName9 == 0 {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ } else {
+ fieldName10 = fieldHash
+ fieldDecoder10 = fieldDecoder
+ }
+ }
+ return &tenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9,
+ fieldName10, fieldDecoder10}
+ }
+ return &generalStructDecoder{typ, fields, false}
+}
+
+type generalStructDecoder struct {
+ typ reflect2.Type
+ fields map[string]*structFieldDecoder
+ disallowUnknownFields bool
+}
+
+func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ var c byte
+ for c = ','; c == ','; c = iter.nextToken() {
+ decoder.decodeOneField(ptr, iter)
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ if c != '}' {
+ iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
+ }
+ iter.decrementDepth()
+}
+
+func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
+ var field string
+ var fieldDecoder *structFieldDecoder
+ if iter.cfg.objectFieldMustBeSimpleString {
+ fieldBytes := iter.ReadStringAsSlice()
+ field = *(*string)(unsafe.Pointer(&fieldBytes))
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ } else {
+ field = iter.ReadString()
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ }
+ if fieldDecoder == nil {
+ if decoder.disallowUnknownFields {
+ msg := "found unknown field: " + field
+ iter.ReportError("ReadObject", msg)
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ iter.Skip()
+ return
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ fieldDecoder.Decode(ptr, iter)
+}
+
+type skipObjectDecoder struct {
+ typ reflect2.Type
+}
+
+func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valueType := iter.WhatIsNext()
+ if valueType != ObjectValue && valueType != NilValue {
+ iter.ReportError("skipObjectDecoder", "expect object or null")
+ return
+ }
+ iter.Skip()
+}
+
+type oneFieldStructDecoder struct {
+ typ reflect2.Type
+ fieldHash int64
+ fieldDecoder *structFieldDecoder
+}
+
+func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ if iter.readFieldHash() == decoder.fieldHash {
+ decoder.fieldDecoder.Decode(ptr, iter)
+ } else {
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type twoFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+}
+
+func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type threeFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+}
+
+func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type fourFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+}
+
+func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type fiveFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+}
+
+func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type sixFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+}
+
+func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type sevenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+}
+
+func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type eightFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+}
+
+func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type nineFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+}
+
+func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type tenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+ fieldHash10 int64
+ fieldDecoder10 *structFieldDecoder
+}
+
+func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ case decoder.fieldHash10:
+ decoder.fieldDecoder10.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type structFieldDecoder struct {
+ field reflect2.StructField
+ fieldDecoder ValDecoder
+}
+
+func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ fieldPtr := decoder.field.UnsafeGet(ptr)
+ decoder.fieldDecoder.Decode(fieldPtr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
+ }
+}
+
+type stringModeStringDecoder struct {
+ elemDecoder ValDecoder
+ cfg *frozenConfig
+}
+
+func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.elemDecoder.Decode(ptr, iter)
+ str := *((*string)(ptr))
+ tempIter := decoder.cfg.BorrowIterator([]byte(str))
+ defer decoder.cfg.ReturnIterator(tempIter)
+ *((*string)(ptr)) = tempIter.ReadString()
+}
+
+type stringModeNumberDecoder struct {
+ elemDecoder ValDecoder
+}
+
+func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.elemDecoder.Decode(ptr, iter)
+ if iter.Error != nil {
+ return
+ }
+ c = iter.readByte()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 000000000..152e3ef5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "unsafe"
+)
+
+func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
+ type bindingTo struct {
+ binding *Binding
+ toName string
+ ignored bool
+ }
+ orderedBindings := []*bindingTo{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, toName := range binding.ToNames {
+ new := &bindingTo{
+ binding: binding,
+ toName: toName,
+ }
+ for _, old := range orderedBindings {
+ if old.toName != toName {
+ continue
+ }
+ old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
+ }
+ orderedBindings = append(orderedBindings, new)
+ }
+ }
+ if len(orderedBindings) == 0 {
+ return &emptyStructEncoder{}
+ }
+ finalOrderedFields := []structFieldTo{}
+ for _, bindingTo := range orderedBindings {
+ if !bindingTo.ignored {
+ finalOrderedFields = append(finalOrderedFields, structFieldTo{
+ encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
+ toName: bindingTo.toName,
+ })
+ }
+ }
+ return &structEncoder{typ, finalOrderedFields}
+}
+
+func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
+ encoder := createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return &structEncoder{typ: typ}
+ case reflect.Array:
+ return &arrayEncoder{}
+ case reflect.Slice:
+ return &sliceEncoder{}
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return &OptionalEncoder{}
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
+ }
+}
+
+func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
+ newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
+ oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
+ if newTagged {
+ if oldTagged {
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ } else {
+ return true, false
+ }
+ } else {
+ if oldTagged {
+ return true, false
+ }
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ }
+}
+
+type structFieldEncoder struct {
+ field reflect2.StructField
+ fieldEncoder ValEncoder
+ omitempty bool
+}
+
+func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ encoder.fieldEncoder.Encode(fieldPtr, stream)
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
+ }
+}
+
+func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return encoder.fieldEncoder.IsEmpty(fieldPtr)
+}
+
+func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type IsEmbeddedPtrNil interface {
+ IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
+}
+
+type structEncoder struct {
+ typ reflect2.Type
+ fields []structFieldTo
+}
+
+type structFieldTo struct {
+ encoder *structFieldEncoder
+ toName string
+}
+
+func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteObjectStart()
+ isNotFirst := false
+ for _, field := range encoder.fields {
+ if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
+ continue
+ }
+ if field.encoder.IsEmbeddedPtrNil(ptr) {
+ continue
+ }
+ if isNotFirst {
+ stream.WriteMore()
+ }
+ stream.WriteObjectField(field.toName)
+ field.encoder.Encode(ptr, stream)
+ isNotFirst = true
+ }
+ stream.WriteObjectEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
+ }
+}
+
+func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type emptyStructEncoder struct {
+}
+
+func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyObject()
+}
+
+func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type stringModeNumberEncoder struct {
+ elemEncoder ValEncoder
+}
+
+func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.elemEncoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
+
+type stringModeStringEncoder struct {
+ elemEncoder ValEncoder
+ cfg *frozenConfig
+}
+
+func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ tempStream := encoder.cfg.BorrowStream(nil)
+ tempStream.Attachment = stream.Attachment
+ defer encoder.cfg.ReturnStream(tempStream)
+ encoder.elemEncoder.Encode(ptr, tempStream)
+ stream.WriteString(string(tempStream.Buffer()))
+}
+
+func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 000000000..17662fded
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// stream is a io.Writer like object, with JSON specific write functions.
+// Error is not returned as return value, but stored as Error member on this stream instance.
+type Stream struct {
+ cfg *frozenConfig
+ out io.Writer
+ buf []byte
+ Error error
+ indention int
+ Attachment interface{} // open for customized encoder
+}
+
+// NewStream create new stream instance.
+// cfg can be jsoniter.ConfigDefault.
+// out can be nil if write to internal buffer.
+// bufSize is the initial size for the internal buffer in bytes.
+func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
+ return &Stream{
+ cfg: cfg.(*frozenConfig),
+ out: out,
+ buf: make([]byte, 0, bufSize),
+ Error: nil,
+ indention: 0,
+ }
+}
+
+// Pool returns a pool can provide more stream with same configuration
+func (stream *Stream) Pool() StreamPool {
+ return stream.cfg
+}
+
+// Reset reuse this stream instance by assign a new writer
+func (stream *Stream) Reset(out io.Writer) {
+ stream.out = out
+ stream.buf = stream.buf[:0]
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (stream *Stream) Available() int {
+ return cap(stream.buf) - len(stream.buf)
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (stream *Stream) Buffered() int {
+ return len(stream.buf)
+}
+
+// Buffer if writer is nil, use this method to take the result
+func (stream *Stream) Buffer() []byte {
+ return stream.buf
+}
+
+// SetBuffer allows to append to the internal buffer directly
+func (stream *Stream) SetBuffer(buf []byte) {
+ stream.buf = buf
+}
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (stream *Stream) Write(p []byte) (nn int, err error) {
+ stream.buf = append(stream.buf, p...)
+ if stream.out != nil {
+ nn, err = stream.out.Write(stream.buf)
+ stream.buf = stream.buf[nn:]
+ return
+ }
+ return len(p), nil
+}
+
+// WriteByte writes a single byte.
+func (stream *Stream) writeByte(c byte) {
+ stream.buf = append(stream.buf, c)
+}
+
+func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
+ stream.buf = append(stream.buf, c1, c2)
+}
+
+func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3)
+}
+
+func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4)
+}
+
+func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (stream *Stream) Flush() error {
+ if stream.out == nil {
+ return nil
+ }
+ if stream.Error != nil {
+ return stream.Error
+ }
+ n, err := stream.out.Write(stream.buf)
+ if err != nil {
+ if stream.Error == nil {
+ stream.Error = err
+ }
+ return err
+ }
+ stream.buf = stream.buf[n:]
+ return nil
+}
+
+// WriteRaw write string out without quotes, just like []byte
+func (stream *Stream) WriteRaw(s string) {
+ stream.buf = append(stream.buf, s...)
+}
+
+// WriteNil write null to stream
+func (stream *Stream) WriteNil() {
+ stream.writeFourBytes('n', 'u', 'l', 'l')
+}
+
+// WriteTrue write true to stream
+func (stream *Stream) WriteTrue() {
+ stream.writeFourBytes('t', 'r', 'u', 'e')
+}
+
+// WriteFalse write false to stream
+func (stream *Stream) WriteFalse() {
+ stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
+}
+
+// WriteBool write true or false into stream
+func (stream *Stream) WriteBool(val bool) {
+ if val {
+ stream.WriteTrue()
+ } else {
+ stream.WriteFalse()
+ }
+}
+
+// WriteObjectStart write { with possible indention
+func (stream *Stream) WriteObjectStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('{')
+ stream.writeIndention(0)
+}
+
+// WriteObjectField write "field": with possible indention
+func (stream *Stream) WriteObjectField(field string) {
+ stream.WriteString(field)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(':', ' ')
+ } else {
+ stream.writeByte(':')
+ }
+}
+
+// WriteObjectEnd write } with possible indention
+func (stream *Stream) WriteObjectEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte('}')
+}
+
+// WriteEmptyObject write {}
+func (stream *Stream) WriteEmptyObject() {
+ stream.writeByte('{')
+ stream.writeByte('}')
+}
+
+// WriteMore write , with possible indention
+func (stream *Stream) WriteMore() {
+ stream.writeByte(',')
+ stream.writeIndention(0)
+ stream.Flush()
+}
+
+// WriteArrayStart write [ with possible indention
+func (stream *Stream) WriteArrayStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('[')
+ stream.writeIndention(0)
+}
+
+// WriteEmptyArray write []
+func (stream *Stream) WriteEmptyArray() {
+ stream.writeTwoBytes('[', ']')
+}
+
+// WriteArrayEnd write ] with possible indention
+func (stream *Stream) WriteArrayEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte(']')
+}
+
+func (stream *Stream) writeIndention(delta int) {
+ if stream.indention == 0 {
+ return
+ }
+ stream.writeByte('\n')
+ toWrite := stream.indention - delta
+ for i := 0; i < toWrite; i++ {
+ stream.buf = append(stream.buf, ' ')
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 000000000..826aa594a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,111 @@
+package jsoniter
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+)
+
+var pow10 []uint64
+
+func init() {
+ pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
+}
+
+// WriteFloat32 write float32 to stream
+func (stream *Stream) WriteFloat32(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ abs := math.Abs(float64(val))
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
+}
+
+// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat32Lossy(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat32(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(float64(val)*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
+
+// WriteFloat64 write float64 to stream
+func (stream *Stream) WriteFloat64(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ abs := math.Abs(val)
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if abs < 1e-6 || abs >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
+}
+
+// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat64Lossy(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat64(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(val*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 000000000..d1059ee4c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
+package jsoniter
+
+var digits []uint32
+
+func init() {
+ digits = make([]uint32, 1000)
+ for i := uint32(0); i < 1000; i++ {
+ digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
+ if i < 10 {
+ digits[i] += 2 << 24
+ } else if i < 100 {
+ digits[i] += 1 << 24
+ }
+ }
+}
+
+func writeFirstBuf(space []byte, v uint32) []byte {
+ start := v >> 24
+ if start == 0 {
+ space = append(space, byte(v>>16), byte(v>>8))
+ } else if start == 1 {
+ space = append(space, byte(v>>8))
+ }
+ space = append(space, byte(v))
+ return space
+}
+
+func writeBuf(buf []byte, v uint32) []byte {
+ return append(buf, byte(v>>16), byte(v>>8), byte(v))
+}
+
+// WriteUint8 write uint8 to stream
+func (stream *Stream) WriteUint8(val uint8) {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteInt8 write int8 to stream
+func (stream *Stream) WriteInt8(nval int8) {
+ var val uint8
+ if nval < 0 {
+ val = uint8(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint8(nval)
+ }
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteUint16 write uint16 to stream
+func (stream *Stream) WriteUint16(val uint16) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+}
+
+// WriteInt16 write int16 to stream
+func (stream *Stream) WriteInt16(nval int16) {
+ var val uint16
+ if nval < 0 {
+ val = uint16(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint16(nval)
+ }
+ stream.WriteUint16(val)
+}
+
+// WriteUint32 write uint32 to stream
+func (stream *Stream) WriteUint32(val uint32) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ } else {
+ r3 := q2 - q3*1000
+ stream.buf = append(stream.buf, byte(q3+'0'))
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt32 write int32 to stream
+func (stream *Stream) WriteInt32(nval int32) {
+ var val uint32
+ if nval < 0 {
+ val = uint32(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint32(nval)
+ }
+ stream.WriteUint32(val)
+}
+
+// WriteUint64 write uint64 to stream
+func (stream *Stream) WriteUint64(val uint64) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r3 := q2 - q3*1000
+ q4 := q3 / 1000
+ if q4 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q3])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r4 := q3 - q4*1000
+ q5 := q4 / 1000
+ if q5 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q4])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r5 := q4 - q5*1000
+ q6 := q5 / 1000
+ if q6 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q5])
+ } else {
+ stream.buf = writeFirstBuf(stream.buf, digits[q6])
+ r6 := q5 - q6*1000
+ stream.buf = writeBuf(stream.buf, digits[r6])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r5])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt64 write int64 to stream
+func (stream *Stream) WriteInt64(nval int64) {
+ var val uint64
+ if nval < 0 {
+ val = uint64(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint64(nval)
+ }
+ stream.WriteUint64(val)
+}
+
+// WriteInt write int to stream
+func (stream *Stream) WriteInt(val int) {
+ stream.WriteInt64(int64(val))
+}
+
+// WriteUint write uint to stream
+func (stream *Stream) WriteUint(val uint) {
+ stream.WriteUint64(uint64(val))
+}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 000000000..54c2ba0b3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
+package jsoniter
+
+import (
+ "unicode/utf8"
+)
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML