| @@ -0,0 +1,315 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Package iam supports the resource-specific operations of Google Cloud | |||
| // IAM (Identity and Access Management) for the Google Cloud Libraries. | |||
| // See https://cloud.google.com/iam for more about IAM. | |||
| // | |||
| // Users of the Google Cloud Libraries will typically not use this package | |||
| // directly. Instead they will begin with some resource that supports IAM, like | |||
| // a pubsub topic, and call its IAM method to get a Handle for that resource. | |||
| package iam | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "time" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| pb "google.golang.org/genproto/googleapis/iam/v1" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/metadata" | |||
| ) | |||
| // client abstracts the IAMPolicy API to allow multiple implementations. | |||
| type client interface { | |||
| Get(ctx context.Context, resource string) (*pb.Policy, error) | |||
| Set(ctx context.Context, resource string, p *pb.Policy) error | |||
| Test(ctx context.Context, resource string, perms []string) ([]string, error) | |||
| } | |||
| // grpcClient implements client for the standard gRPC-based IAMPolicy service. | |||
| type grpcClient struct { | |||
| c pb.IAMPolicyClient | |||
| } | |||
| var withRetry = gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.DeadlineExceeded, | |||
| codes.Unavailable, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60 * time.Second, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }) | |||
| func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { | |||
| var proto *pb.Policy | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) | |||
| ctx = insertMetadata(ctx, md) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { | |||
| var err error | |||
| proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) | |||
| return err | |||
| }, withRetry) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return proto, nil | |||
| } | |||
| func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) | |||
| ctx = insertMetadata(ctx, md) | |||
| return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { | |||
| _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ | |||
| Resource: resource, | |||
| Policy: p, | |||
| }) | |||
| return err | |||
| }, withRetry) | |||
| } | |||
| func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { | |||
| var res *pb.TestIamPermissionsResponse | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) | |||
| ctx = insertMetadata(ctx, md) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { | |||
| var err error | |||
| res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ | |||
| Resource: resource, | |||
| Permissions: perms, | |||
| }) | |||
| return err | |||
| }, withRetry) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return res.Permissions, nil | |||
| } | |||
| // A Handle provides IAM operations for a resource. | |||
| type Handle struct { | |||
| c client | |||
| resource string | |||
| } | |||
| // InternalNewHandle is for use by the Google Cloud Libraries only. | |||
| // | |||
| // InternalNewHandle returns a Handle for resource. | |||
| // The conn parameter refers to a server that must support the IAMPolicy service. | |||
| func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { | |||
| return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) | |||
| } | |||
| // InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. | |||
| // | |||
| // InternalNewHandleClient returns a Handle for resource using the given | |||
| // grpc service that implements IAM as a mixin | |||
| func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { | |||
| return InternalNewHandleClient(&grpcClient{c: c}, resource) | |||
| } | |||
| // InternalNewHandleClient is for use by the Google Cloud Libraries only. | |||
| // | |||
| // InternalNewHandleClient returns a Handle for resource using the given | |||
| // client implementation. | |||
| func InternalNewHandleClient(c client, resource string) *Handle { | |||
| return &Handle{ | |||
| c: c, | |||
| resource: resource, | |||
| } | |||
| } | |||
| // Policy retrieves the IAM policy for the resource. | |||
| func (h *Handle) Policy(ctx context.Context) (*Policy, error) { | |||
| proto, err := h.c.Get(ctx, h.resource) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &Policy{InternalProto: proto}, nil | |||
| } | |||
| // SetPolicy replaces the resource's current policy with the supplied Policy. | |||
| // | |||
| // If policy was created from a prior call to Get, then the modification will | |||
| // only succeed if the policy has not changed since the Get. | |||
| func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { | |||
| return h.c.Set(ctx, h.resource, policy.InternalProto) | |||
| } | |||
| // TestPermissions returns the subset of permissions that the caller has on the resource. | |||
| func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { | |||
| return h.c.Test(ctx, h.resource, permissions) | |||
| } | |||
| // A RoleName is a name representing a collection of permissions. | |||
| type RoleName string | |||
| // Common role names. | |||
| const ( | |||
| Owner RoleName = "roles/owner" | |||
| Editor RoleName = "roles/editor" | |||
| Viewer RoleName = "roles/viewer" | |||
| ) | |||
| const ( | |||
| // AllUsers is a special member that denotes all users, even unauthenticated ones. | |||
| AllUsers = "allUsers" | |||
| // AllAuthenticatedUsers is a special member that denotes all authenticated users. | |||
| AllAuthenticatedUsers = "allAuthenticatedUsers" | |||
| ) | |||
| // A Policy is a list of Bindings representing roles | |||
| // granted to members. | |||
| // | |||
| // The zero Policy is a valid policy with no bindings. | |||
| type Policy struct { | |||
| // TODO(jba): when type aliases are available, put Policy into an internal package | |||
| // and provide an exported alias here. | |||
| // This field is exported for use by the Google Cloud Libraries only. | |||
| // It may become unexported in a future release. | |||
| InternalProto *pb.Policy | |||
| } | |||
| // Members returns the list of members with the supplied role. | |||
| // The return value should not be modified. Use Add and Remove | |||
| // to modify the members of a role. | |||
| func (p *Policy) Members(r RoleName) []string { | |||
| b := p.binding(r) | |||
| if b == nil { | |||
| return nil | |||
| } | |||
| return b.Members | |||
| } | |||
| // HasRole reports whether member has role r. | |||
| func (p *Policy) HasRole(member string, r RoleName) bool { | |||
| return memberIndex(member, p.binding(r)) >= 0 | |||
| } | |||
| // Add adds member member to role r if it is not already present. | |||
| // A new binding is created if there is no binding for the role. | |||
| func (p *Policy) Add(member string, r RoleName) { | |||
| b := p.binding(r) | |||
| if b == nil { | |||
| if p.InternalProto == nil { | |||
| p.InternalProto = &pb.Policy{} | |||
| } | |||
| p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ | |||
| Role: string(r), | |||
| Members: []string{member}, | |||
| }) | |||
| return | |||
| } | |||
| if memberIndex(member, b) < 0 { | |||
| b.Members = append(b.Members, member) | |||
| return | |||
| } | |||
| } | |||
| // Remove removes member from role r if it is present. | |||
| func (p *Policy) Remove(member string, r RoleName) { | |||
| bi := p.bindingIndex(r) | |||
| if bi < 0 { | |||
| return | |||
| } | |||
| bindings := p.InternalProto.Bindings | |||
| b := bindings[bi] | |||
| mi := memberIndex(member, b) | |||
| if mi < 0 { | |||
| return | |||
| } | |||
| // Order doesn't matter for bindings or members, so to remove, move the last item | |||
| // into the removed spot and shrink the slice. | |||
| if len(b.Members) == 1 { | |||
| // Remove binding. | |||
| last := len(bindings) - 1 | |||
| bindings[bi] = bindings[last] | |||
| bindings[last] = nil | |||
| p.InternalProto.Bindings = bindings[:last] | |||
| return | |||
| } | |||
| // Remove member. | |||
| // TODO(jba): worry about multiple copies of m? | |||
| last := len(b.Members) - 1 | |||
| b.Members[mi] = b.Members[last] | |||
| b.Members[last] = "" | |||
| b.Members = b.Members[:last] | |||
| } | |||
| // Roles returns the names of all the roles that appear in the Policy. | |||
| func (p *Policy) Roles() []RoleName { | |||
| if p.InternalProto == nil { | |||
| return nil | |||
| } | |||
| var rns []RoleName | |||
| for _, b := range p.InternalProto.Bindings { | |||
| rns = append(rns, RoleName(b.Role)) | |||
| } | |||
| return rns | |||
| } | |||
| // binding returns the Binding for the suppied role, or nil if there isn't one. | |||
| func (p *Policy) binding(r RoleName) *pb.Binding { | |||
| i := p.bindingIndex(r) | |||
| if i < 0 { | |||
| return nil | |||
| } | |||
| return p.InternalProto.Bindings[i] | |||
| } | |||
| func (p *Policy) bindingIndex(r RoleName) int { | |||
| if p.InternalProto == nil { | |||
| return -1 | |||
| } | |||
| for i, b := range p.InternalProto.Bindings { | |||
| if b.Role == string(r) { | |||
| return i | |||
| } | |||
| } | |||
| return -1 | |||
| } | |||
| // memberIndex returns the index of m in b's Members, or -1 if not found. | |||
| func memberIndex(m string, b *pb.Binding) int { | |||
| if b == nil { | |||
| return -1 | |||
| } | |||
| for i, mm := range b.Members { | |||
| if mm == m { | |||
| return i | |||
| } | |||
| } | |||
| return -1 | |||
| } | |||
| // insertMetadata inserts metadata into the given context | |||
| func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||
| out, _ := metadata.FromOutgoingContext(ctx) | |||
| out = out.Copy() | |||
| for _, md := range mds { | |||
| for k, v := range md { | |||
| out[k] = append(out[k], v...) | |||
| } | |||
| } | |||
| return metadata.NewOutgoingContext(ctx, out) | |||
| } | |||
| @@ -0,0 +1,108 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Package optional provides versions of primitive types that can | |||
| // be nil. These are useful in methods that update some of an API object's | |||
| // fields. | |||
| package optional | |||
| import ( | |||
| "fmt" | |||
| "strings" | |||
| "time" | |||
| ) | |||
| type ( | |||
| // Bool is either a bool or nil. | |||
| Bool interface{} | |||
| // String is either a string or nil. | |||
| String interface{} | |||
| // Int is either an int or nil. | |||
| Int interface{} | |||
| // Uint is either a uint or nil. | |||
| Uint interface{} | |||
| // Float64 is either a float64 or nil. | |||
| Float64 interface{} | |||
| // Duration is either a time.Duration or nil. | |||
| Duration interface{} | |||
| ) | |||
| // ToBool returns its argument as a bool. | |||
| // It panics if its argument is nil or not a bool. | |||
| func ToBool(v Bool) bool { | |||
| x, ok := v.(bool) | |||
| if !ok { | |||
| doPanic("Bool", v) | |||
| } | |||
| return x | |||
| } | |||
| // ToString returns its argument as a string. | |||
| // It panics if its argument is nil or not a string. | |||
| func ToString(v String) string { | |||
| x, ok := v.(string) | |||
| if !ok { | |||
| doPanic("String", v) | |||
| } | |||
| return x | |||
| } | |||
| // ToInt returns its argument as an int. | |||
| // It panics if its argument is nil or not an int. | |||
| func ToInt(v Int) int { | |||
| x, ok := v.(int) | |||
| if !ok { | |||
| doPanic("Int", v) | |||
| } | |||
| return x | |||
| } | |||
| // ToUint returns its argument as a uint. | |||
| // It panics if its argument is nil or not a uint. | |||
| func ToUint(v Uint) uint { | |||
| x, ok := v.(uint) | |||
| if !ok { | |||
| doPanic("Uint", v) | |||
| } | |||
| return x | |||
| } | |||
| // ToFloat64 returns its argument as a float64. | |||
| // It panics if its argument is nil or not a float64. | |||
| func ToFloat64(v Float64) float64 { | |||
| x, ok := v.(float64) | |||
| if !ok { | |||
| doPanic("Float64", v) | |||
| } | |||
| return x | |||
| } | |||
| // ToDuration returns its argument as a time.Duration. | |||
| // It panics if its argument is nil or not a time.Duration. | |||
| func ToDuration(v Duration) time.Duration { | |||
| x, ok := v.(time.Duration) | |||
| if !ok { | |||
| doPanic("Duration", v) | |||
| } | |||
| return x | |||
| } | |||
| func doPanic(capType string, v interface{}) { | |||
| panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) | |||
| } | |||
| @@ -0,0 +1,19 @@ | |||
| #!/bin/bash | |||
| # Copyright 2019 Google LLC | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| # You may obtain a copy of the License at | |||
| # | |||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||
| # | |||
| # Unless required by applicable law or agreed to in writing, software | |||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| today=$(date +%Y%m%d) | |||
| sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE | |||
| @@ -0,0 +1,71 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| //go:generate ./update_version.sh | |||
| // Package version contains version information for Google Cloud Client | |||
| // Libraries for Go, as reported in request headers. | |||
| package version | |||
| import ( | |||
| "runtime" | |||
| "strings" | |||
| "unicode" | |||
| ) | |||
| // Repo is the current version of the client libraries in this | |||
| // repo. It should be a date in YYYYMMDD format. | |||
| const Repo = "20190802" | |||
| // Go returns the Go runtime version. The returned string | |||
| // has no whitespace. | |||
| func Go() string { | |||
| return goVersion | |||
| } | |||
| var goVersion = goVer(runtime.Version()) | |||
| const develPrefix = "devel +" | |||
| func goVer(s string) string { | |||
| if strings.HasPrefix(s, develPrefix) { | |||
| s = s[len(develPrefix):] | |||
| if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { | |||
| s = s[:p] | |||
| } | |||
| return s | |||
| } | |||
| if strings.HasPrefix(s, "go1") { | |||
| s = s[2:] | |||
| var prerelease string | |||
| if p := strings.IndexFunc(s, notSemverRune); p >= 0 { | |||
| s, prerelease = s[:p], s[p:] | |||
| } | |||
| if strings.HasSuffix(s, ".") { | |||
| s += "0" | |||
| } else if strings.Count(s, ".") < 2 { | |||
| s += ".0" | |||
| } | |||
| if prerelease != "" { | |||
| s += "-" + prerelease | |||
| } | |||
| return s | |||
| } | |||
| return "" | |||
| } | |||
| func notSemverRune(r rune) bool { | |||
| return !strings.ContainsRune("0123456789.", r) | |||
| } | |||
| @@ -0,0 +1,46 @@ | |||
| ## Cloud Pub/Sub [](https://godoc.org/cloud.google.com/go/pubsub) | |||
| - [About Cloud Pubsub](https://cloud.google.com/pubsub/) | |||
| - [API documentation](https://cloud.google.com/pubsub/docs) | |||
| - [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) | |||
| - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) | |||
| ### Example Usage | |||
| First create a `pubsub.Client` to use throughout your application: | |||
| [snip]:# (pubsub-1) | |||
| ```go | |||
| client, err := pubsub.NewClient(ctx, "project-id") | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| ``` | |||
| Then use the client to publish and subscribe: | |||
| [snip]:# (pubsub-2) | |||
| ```go | |||
| // Publish "hello world" on topic1. | |||
| topic := client.Topic("topic1") | |||
| res := topic.Publish(ctx, &pubsub.Message{ | |||
| Data: []byte("hello world"), | |||
| }) | |||
| // The publish happens asynchronously. | |||
| // Later, you can get the result from res: | |||
| ... | |||
| msgID, err := res.Get(ctx) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| // Use a callback to receive messages via subscription1. | |||
| sub := client.Subscription("subscription1") | |||
| err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { | |||
| fmt.Println(m.Data) | |||
| m.Ack() // Acknowledge that we've consumed the message. | |||
| }) | |||
| if err != nil { | |||
| log.Println(err) | |||
| } | |||
| ``` | |||
| @@ -0,0 +1,9 @@ | |||
| Auto-generated pubsub v1 clients | |||
| ================================= | |||
| This package includes auto-generated clients for the pubsub v1 API. | |||
| Use the handwritten client (in the parent directory, | |||
| cloud.google.com/go/pubsub) in preference to this. | |||
| This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. | |||
| @@ -0,0 +1,103 @@ | |||
| // Copyright 2019 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // https://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Code generated by gapic-generator. DO NOT EDIT. | |||
| // Package pubsub is an auto-generated package for the | |||
| // Google Cloud Pub/Sub API. | |||
| // | |||
| // Provides reliable, many-to-many, asynchronous messaging between | |||
| // applications. | |||
| // | |||
| // Use of Context | |||
| // | |||
| // The ctx passed to NewClient is used for authentication requests and | |||
| // for creating the underlying connection, but is not used for subsequent calls. | |||
| // Individual methods on the client use the ctx given to them. | |||
| // | |||
| // To close the open connection, use the Close() method. | |||
| // | |||
| // For information about setting deadlines, reusing contexts, and more | |||
| // please visit godoc.org/cloud.google.com/go. | |||
| // | |||
| // Use the client at cloud.google.com/go/pubsub in preference to this. | |||
| package pubsub // import "cloud.google.com/go/pubsub/apiv1" | |||
| import ( | |||
| "context" | |||
| "runtime" | |||
| "strings" | |||
| "unicode" | |||
| "google.golang.org/grpc/metadata" | |||
| ) | |||
| func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { | |||
| out, _ := metadata.FromOutgoingContext(ctx) | |||
| out = out.Copy() | |||
| for _, md := range mds { | |||
| for k, v := range md { | |||
| out[k] = append(out[k], v...) | |||
| } | |||
| } | |||
| return metadata.NewOutgoingContext(ctx, out) | |||
| } | |||
| // DefaultAuthScopes reports the default set of authentication scopes to use with this package. | |||
| func DefaultAuthScopes() []string { | |||
| return []string{ | |||
| "https://www.googleapis.com/auth/cloud-platform", | |||
| "https://www.googleapis.com/auth/pubsub", | |||
| } | |||
| } | |||
| // versionGo returns the Go runtime version. The returned string | |||
| // has no whitespace, suitable for reporting in header. | |||
| func versionGo() string { | |||
| const develPrefix = "devel +" | |||
| s := runtime.Version() | |||
| if strings.HasPrefix(s, develPrefix) { | |||
| s = s[len(develPrefix):] | |||
| if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { | |||
| s = s[:p] | |||
| } | |||
| return s | |||
| } | |||
| notSemverRune := func(r rune) bool { | |||
| return strings.IndexRune("0123456789.", r) < 0 | |||
| } | |||
| if strings.HasPrefix(s, "go1") { | |||
| s = s[2:] | |||
| var prerelease string | |||
| if p := strings.IndexFunc(s, notSemverRune); p >= 0 { | |||
| s, prerelease = s[:p], s[p:] | |||
| } | |||
| if strings.HasSuffix(s, ".") { | |||
| s += "0" | |||
| } else if strings.Count(s, ".") < 2 { | |||
| s += ".0" | |||
| } | |||
| if prerelease != "" { | |||
| s += "-" + prerelease | |||
| } | |||
| return s | |||
| } | |||
| return "UNKNOWN" | |||
| } | |||
| const versionClient = "20190819" | |||
| @@ -0,0 +1,36 @@ | |||
| // Copyright 2018 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // https://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "cloud.google.com/go/iam" | |||
| pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| ) | |||
| func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { | |||
| return iam.InternalNewHandle(c.Connection(), subscription.Name) | |||
| } | |||
| func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { | |||
| return iam.InternalNewHandle(c.Connection(), topic.Name) | |||
| } | |||
| func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { | |||
| return iam.InternalNewHandle(c.Connection(), subscription.Name) | |||
| } | |||
| func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { | |||
| return iam.InternalNewHandle(c.Connection(), topic.Name) | |||
| } | |||
| @@ -0,0 +1,95 @@ | |||
| // Copyright 2018 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // https://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| // PublisherProjectPath returns the path for the project resource. | |||
| // | |||
| // Deprecated: Use | |||
| // fmt.Sprintf("projects/%s", project) | |||
| // instead. | |||
| func PublisherProjectPath(project string) string { | |||
| return "" + | |||
| "projects/" + | |||
| project + | |||
| "" | |||
| } | |||
| // PublisherTopicPath returns the path for the topic resource. | |||
| // | |||
| // Deprecated: Use | |||
| // fmt.Sprintf("projects/%s/topics/%s", project, topic) | |||
| // instead. | |||
| func PublisherTopicPath(project, topic string) string { | |||
| return "" + | |||
| "projects/" + | |||
| project + | |||
| "/topics/" + | |||
| topic + | |||
| "" | |||
| } | |||
| // SubscriberProjectPath returns the path for the project resource. | |||
| // | |||
| // Deprecated: Use | |||
| // fmt.Sprintf("projects/%s", project) | |||
| // instead. | |||
| func SubscriberProjectPath(project string) string { | |||
| return "" + | |||
| "projects/" + | |||
| project + | |||
| "" | |||
| } | |||
| // SubscriberSnapshotPath returns the path for the snapshot resource. | |||
| // | |||
| // Deprecated: Use | |||
| // fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) | |||
| // instead. | |||
| func SubscriberSnapshotPath(project, snapshot string) string { | |||
| return "" + | |||
| "projects/" + | |||
| project + | |||
| "/snapshots/" + | |||
| snapshot + | |||
| "" | |||
| } | |||
| // SubscriberSubscriptionPath returns the path for the subscription resource. | |||
| // | |||
| // Deprecated: Use | |||
| // fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) | |||
| // instead. | |||
| func SubscriberSubscriptionPath(project, subscription string) string { | |||
| return "" + | |||
| "projects/" + | |||
| project + | |||
| "/subscriptions/" + | |||
| subscription + | |||
| "" | |||
| } | |||
| // SubscriberTopicPath returns the path for the topic resource. | |||
| // | |||
| // Deprecated: Use | |||
| // fmt.Sprintf("projects/%s/topics/%s", project, topic) | |||
| // instead. | |||
| func SubscriberTopicPath(project, topic string) string { | |||
| return "" + | |||
| "projects/" + | |||
| project + | |||
| "/topics/" + | |||
| topic + | |||
| "" | |||
| } | |||
| @@ -0,0 +1,417 @@ | |||
| // Copyright 2019 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // https://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Code generated by gapic-generator. DO NOT EDIT. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "math" | |||
| "net/url" | |||
| "time" | |||
| "github.com/golang/protobuf/proto" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| "google.golang.org/api/iterator" | |||
| "google.golang.org/api/option" | |||
| "google.golang.org/api/transport" | |||
| pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/metadata" | |||
| ) | |||
| // PublisherCallOptions contains the retry settings for each method of PublisherClient. | |||
| type PublisherCallOptions struct { | |||
| CreateTopic []gax.CallOption | |||
| UpdateTopic []gax.CallOption | |||
| Publish []gax.CallOption | |||
| GetTopic []gax.CallOption | |||
| ListTopics []gax.CallOption | |||
| ListTopicSubscriptions []gax.CallOption | |||
| DeleteTopic []gax.CallOption | |||
| } | |||
| func defaultPublisherClientOptions() []option.ClientOption { | |||
| return []option.ClientOption{ | |||
| option.WithEndpoint("pubsub.googleapis.com:443"), | |||
| option.WithScopes(DefaultAuthScopes()...), | |||
| option.WithGRPCDialOption(grpc.WithDefaultCallOptions( | |||
| grpc.MaxCallRecvMsgSize(math.MaxInt32))), | |||
| } | |||
| } | |||
| func defaultPublisherCallOptions() *PublisherCallOptions { | |||
| retry := map[[2]string][]gax.CallOption{ | |||
| {"default", "idempotent"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Aborted, | |||
| codes.Unavailable, | |||
| codes.Unknown, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| {"default", "non_idempotent"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Unavailable, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| {"messaging", "publish"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Aborted, | |||
| codes.Canceled, | |||
| codes.DeadlineExceeded, | |||
| codes.Internal, | |||
| codes.ResourceExhausted, | |||
| codes.Unavailable, | |||
| codes.Unknown, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| } | |||
| return &PublisherCallOptions{ | |||
| CreateTopic: retry[[2]string{"default", "non_idempotent"}], | |||
| UpdateTopic: retry[[2]string{"default", "non_idempotent"}], | |||
| Publish: retry[[2]string{"messaging", "publish"}], | |||
| GetTopic: retry[[2]string{"default", "idempotent"}], | |||
| ListTopics: retry[[2]string{"default", "idempotent"}], | |||
| ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}], | |||
| DeleteTopic: retry[[2]string{"default", "non_idempotent"}], | |||
| } | |||
| } | |||
| // PublisherClient is a client for interacting with Google Cloud Pub/Sub API. | |||
| // | |||
| // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||
| type PublisherClient struct { | |||
| // The connection to the service. | |||
| conn *grpc.ClientConn | |||
| // The gRPC API client. | |||
| publisherClient pubsubpb.PublisherClient | |||
| // The call options for this service. | |||
| CallOptions *PublisherCallOptions | |||
| // The x-goog-* metadata to be sent with each request. | |||
| xGoogMetadata metadata.MD | |||
| } | |||
| // NewPublisherClient creates a new publisher client. | |||
| // | |||
| // The service that an application uses to manipulate topics, and to send | |||
| // messages to a topic. | |||
| func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { | |||
| conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| c := &PublisherClient{ | |||
| conn: conn, | |||
| CallOptions: defaultPublisherCallOptions(), | |||
| publisherClient: pubsubpb.NewPublisherClient(conn), | |||
| } | |||
| c.SetGoogleClientInfo() | |||
| return c, nil | |||
| } | |||
| // Connection returns the client's connection to the API service. | |||
| func (c *PublisherClient) Connection() *grpc.ClientConn { | |||
| return c.conn | |||
| } | |||
| // Close closes the connection to the API service. The user should invoke this when | |||
| // the client is no longer required. | |||
| func (c *PublisherClient) Close() error { | |||
| return c.conn.Close() | |||
| } | |||
| // SetGoogleClientInfo sets the name and version of the application in | |||
| // the `x-goog-api-client` header passed on each request. Intended for | |||
| // use by Google-written clients. | |||
| func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { | |||
| kv := append([]string{"gl-go", versionGo()}, keyval...) | |||
| kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) | |||
| c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||
| } | |||
| // CreateTopic creates the given topic with the given name. See the | |||
| // <a href="https://cloud.google.com/pubsub/docs/admin#resource_names"> | |||
| // resource name rules</a>. | |||
| func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) | |||
| var resp *pubsubpb.Topic | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // UpdateTopic updates an existing topic. Note that certain properties of a | |||
| // topic are not modifiable. | |||
| func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic.name", url.QueryEscape(req.GetTopic().GetName()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...) | |||
| var resp *pubsubpb.Topic | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic | |||
| // does not exist. | |||
| func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) | |||
| var resp *pubsubpb.PublishResponse | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // GetTopic gets the configuration of a topic. | |||
| func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) | |||
| var resp *pubsubpb.Topic | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // ListTopics lists matching topics. | |||
| func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) | |||
| it := &TopicIterator{} | |||
| req = proto.Clone(req).(*pubsubpb.ListTopicsRequest) | |||
| it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { | |||
| var resp *pubsubpb.ListTopicsResponse | |||
| req.PageToken = pageToken | |||
| if pageSize > math.MaxInt32 { | |||
| req.PageSize = math.MaxInt32 | |||
| } else { | |||
| req.PageSize = int32(pageSize) | |||
| } | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, "", err | |||
| } | |||
| return resp.Topics, resp.NextPageToken, nil | |||
| } | |||
| fetch := func(pageSize int, pageToken string) (string, error) { | |||
| items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| it.items = append(it.items, items...) | |||
| return nextPageToken, nil | |||
| } | |||
| it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
| it.pageInfo.MaxSize = int(req.PageSize) | |||
| it.pageInfo.Token = req.PageToken | |||
| return it | |||
| } | |||
| // ListTopicSubscriptions lists the names of the subscriptions on this topic. | |||
| func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) | |||
| it := &StringIterator{} | |||
| req = proto.Clone(req).(*pubsubpb.ListTopicSubscriptionsRequest) | |||
| it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { | |||
| var resp *pubsubpb.ListTopicSubscriptionsResponse | |||
| req.PageToken = pageToken | |||
| if pageSize > math.MaxInt32 { | |||
| req.PageSize = math.MaxInt32 | |||
| } else { | |||
| req.PageSize = int32(pageSize) | |||
| } | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, "", err | |||
| } | |||
| return resp.Subscriptions, resp.NextPageToken, nil | |||
| } | |||
| fetch := func(pageSize int, pageToken string) (string, error) { | |||
| items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| it.items = append(it.items, items...) | |||
| return nextPageToken, nil | |||
| } | |||
| it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
| it.pageInfo.MaxSize = int(req.PageSize) | |||
| it.pageInfo.Token = req.PageToken | |||
| return it | |||
| } | |||
| // DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic | |||
| // does not exist. After a topic is deleted, a new topic may be created with | |||
| // the same name; this is an entirely new topic with none of the old | |||
| // configuration or subscriptions. Existing subscriptions to this topic are | |||
| // not deleted, but their topic field is set to _deleted-topic_. | |||
| func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| return err | |||
| } | |||
| // StringIterator manages a stream of string. | |||
| type StringIterator struct { | |||
| items []string | |||
| pageInfo *iterator.PageInfo | |||
| nextFunc func() error | |||
| // InternalFetch is for use by the Google Cloud Libraries only. | |||
| // It is not part of the stable interface of this package. | |||
| // | |||
| // InternalFetch returns results from a single call to the underlying RPC. | |||
| // The number of results is no greater than pageSize. | |||
| // If there are no more results, nextPageToken is empty and err is nil. | |||
| InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) | |||
| } | |||
| // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
| func (it *StringIterator) PageInfo() *iterator.PageInfo { | |||
| return it.pageInfo | |||
| } | |||
| // Next returns the next result. Its second return value is iterator.Done if there are no more | |||
| // results. Once Next returns Done, all subsequent calls will return Done. | |||
| func (it *StringIterator) Next() (string, error) { | |||
| var item string | |||
| if err := it.nextFunc(); err != nil { | |||
| return item, err | |||
| } | |||
| item = it.items[0] | |||
| it.items = it.items[1:] | |||
| return item, nil | |||
| } | |||
| func (it *StringIterator) bufLen() int { | |||
| return len(it.items) | |||
| } | |||
| func (it *StringIterator) takeBuf() interface{} { | |||
| b := it.items | |||
| it.items = nil | |||
| return b | |||
| } | |||
| // TopicIterator manages a stream of *pubsubpb.Topic. | |||
| type TopicIterator struct { | |||
| items []*pubsubpb.Topic | |||
| pageInfo *iterator.PageInfo | |||
| nextFunc func() error | |||
| // InternalFetch is for use by the Google Cloud Libraries only. | |||
| // It is not part of the stable interface of this package. | |||
| // | |||
| // InternalFetch returns results from a single call to the underlying RPC. | |||
| // The number of results is no greater than pageSize. | |||
| // If there are no more results, nextPageToken is empty and err is nil. | |||
| InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) | |||
| } | |||
| // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
| func (it *TopicIterator) PageInfo() *iterator.PageInfo { | |||
| return it.pageInfo | |||
| } | |||
| // Next returns the next result. Its second return value is iterator.Done if there are no more | |||
| // results. Once Next returns Done, all subsequent calls will return Done. | |||
| func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { | |||
| var item *pubsubpb.Topic | |||
| if err := it.nextFunc(); err != nil { | |||
| return item, err | |||
| } | |||
| item = it.items[0] | |||
| it.items = it.items[1:] | |||
| return item, nil | |||
| } | |||
| func (it *TopicIterator) bufLen() int { | |||
| return len(it.items) | |||
| } | |||
| func (it *TopicIterator) takeBuf() interface{} { | |||
| b := it.items | |||
| it.items = nil | |||
| return b | |||
| } | |||
| @@ -0,0 +1,635 @@ | |||
| // Copyright 2019 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // https://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Code generated by gapic-generator. DO NOT EDIT. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "math" | |||
| "net/url" | |||
| "time" | |||
| "github.com/golang/protobuf/proto" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| "google.golang.org/api/iterator" | |||
| "google.golang.org/api/option" | |||
| "google.golang.org/api/transport" | |||
| pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/metadata" | |||
| ) | |||
| // SubscriberCallOptions contains the retry settings for each method of SubscriberClient. | |||
| type SubscriberCallOptions struct { | |||
| CreateSubscription []gax.CallOption | |||
| GetSubscription []gax.CallOption | |||
| UpdateSubscription []gax.CallOption | |||
| ListSubscriptions []gax.CallOption | |||
| DeleteSubscription []gax.CallOption | |||
| ModifyAckDeadline []gax.CallOption | |||
| Acknowledge []gax.CallOption | |||
| Pull []gax.CallOption | |||
| StreamingPull []gax.CallOption | |||
| ModifyPushConfig []gax.CallOption | |||
| ListSnapshots []gax.CallOption | |||
| CreateSnapshot []gax.CallOption | |||
| UpdateSnapshot []gax.CallOption | |||
| DeleteSnapshot []gax.CallOption | |||
| Seek []gax.CallOption | |||
| } | |||
| func defaultSubscriberClientOptions() []option.ClientOption { | |||
| return []option.ClientOption{ | |||
| option.WithEndpoint("pubsub.googleapis.com:443"), | |||
| option.WithScopes(DefaultAuthScopes()...), | |||
| option.WithGRPCDialOption(grpc.WithDefaultCallOptions( | |||
| grpc.MaxCallRecvMsgSize(math.MaxInt32))), | |||
| } | |||
| } | |||
| func defaultSubscriberCallOptions() *SubscriberCallOptions { | |||
| retry := map[[2]string][]gax.CallOption{ | |||
| {"default", "idempotent"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Aborted, | |||
| codes.Unavailable, | |||
| codes.Unknown, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| {"default", "non_idempotent"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Unavailable, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| {"messaging", "idempotent"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Aborted, | |||
| codes.Unavailable, | |||
| codes.Unknown, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| {"messaging", "non_idempotent"}: { | |||
| gax.WithRetry(func() gax.Retryer { | |||
| return gax.OnCodes([]codes.Code{ | |||
| codes.Unavailable, | |||
| }, gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: 60000 * time.Millisecond, | |||
| Multiplier: 1.3, | |||
| }) | |||
| }), | |||
| }, | |||
| } | |||
| return &SubscriberCallOptions{ | |||
| CreateSubscription: retry[[2]string{"default", "idempotent"}], | |||
| GetSubscription: retry[[2]string{"default", "idempotent"}], | |||
| UpdateSubscription: retry[[2]string{"default", "non_idempotent"}], | |||
| ListSubscriptions: retry[[2]string{"default", "idempotent"}], | |||
| DeleteSubscription: retry[[2]string{"default", "non_idempotent"}], | |||
| ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], | |||
| Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], | |||
| Pull: retry[[2]string{"messaging", "idempotent"}], | |||
| StreamingPull: retry[[2]string{"streaming_messaging", "none"}], | |||
| ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], | |||
| ListSnapshots: retry[[2]string{"default", "idempotent"}], | |||
| CreateSnapshot: retry[[2]string{"default", "non_idempotent"}], | |||
| UpdateSnapshot: retry[[2]string{"default", "non_idempotent"}], | |||
| DeleteSnapshot: retry[[2]string{"default", "non_idempotent"}], | |||
| Seek: retry[[2]string{"default", "idempotent"}], | |||
| } | |||
| } | |||
| // SubscriberClient is a client for interacting with Google Cloud Pub/Sub API. | |||
| // | |||
| // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. | |||
| type SubscriberClient struct { | |||
| // The connection to the service. | |||
| conn *grpc.ClientConn | |||
| // The gRPC API client. | |||
| subscriberClient pubsubpb.SubscriberClient | |||
| // The call options for this service. | |||
| CallOptions *SubscriberCallOptions | |||
| // The x-goog-* metadata to be sent with each request. | |||
| xGoogMetadata metadata.MD | |||
| } | |||
| // NewSubscriberClient creates a new subscriber client. | |||
| // | |||
| // The service that an application uses to manipulate subscriptions and to | |||
| // consume messages from a subscription via the Pull method or by | |||
| // establishing a bi-directional stream using the StreamingPull method. | |||
| func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { | |||
| conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| c := &SubscriberClient{ | |||
| conn: conn, | |||
| CallOptions: defaultSubscriberCallOptions(), | |||
| subscriberClient: pubsubpb.NewSubscriberClient(conn), | |||
| } | |||
| c.SetGoogleClientInfo() | |||
| return c, nil | |||
| } | |||
| // Connection returns the client's connection to the API service. | |||
| func (c *SubscriberClient) Connection() *grpc.ClientConn { | |||
| return c.conn | |||
| } | |||
| // Close closes the connection to the API service. The user should invoke this when | |||
| // the client is no longer required. | |||
| func (c *SubscriberClient) Close() error { | |||
| return c.conn.Close() | |||
| } | |||
| // SetGoogleClientInfo sets the name and version of the application in | |||
| // the `x-goog-api-client` header passed on each request. Intended for | |||
| // use by Google-written clients. | |||
| func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { | |||
| kv := append([]string{"gl-go", versionGo()}, keyval...) | |||
| kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) | |||
| c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) | |||
| } | |||
| // CreateSubscription creates a subscription to a given topic. See the | |||
| // <a href="https://cloud.google.com/pubsub/docs/admin#resource_names"> | |||
| // resource name rules</a>. | |||
| // If the subscription already exists, returns ALREADY_EXISTS. | |||
| // If the corresponding topic doesn't exist, returns NOT_FOUND. | |||
| // | |||
| // If the name is not provided in the request, the server will assign a random | |||
| // name for this subscription on the same project as the topic, conforming | |||
| // to the | |||
| // resource name | |||
| // format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The | |||
| // generated name is populated in the returned Subscription object. Note that | |||
| // for REST API requests, you must specify a name in the request. | |||
| func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) | |||
| var resp *pubsubpb.Subscription | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // GetSubscription gets the configuration details of a subscription. | |||
| func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) | |||
| var resp *pubsubpb.Subscription | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // UpdateSubscription updates an existing subscription. Note that certain properties of a | |||
| // subscription, such as its topic, are not modifiable. | |||
| func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription.name", url.QueryEscape(req.GetSubscription().GetName()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) | |||
| var resp *pubsubpb.Subscription | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // ListSubscriptions lists matching subscriptions. | |||
| func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) | |||
| it := &SubscriptionIterator{} | |||
| req = proto.Clone(req).(*pubsubpb.ListSubscriptionsRequest) | |||
| it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { | |||
| var resp *pubsubpb.ListSubscriptionsResponse | |||
| req.PageToken = pageToken | |||
| if pageSize > math.MaxInt32 { | |||
| req.PageSize = math.MaxInt32 | |||
| } else { | |||
| req.PageSize = int32(pageSize) | |||
| } | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, "", err | |||
| } | |||
| return resp.Subscriptions, resp.NextPageToken, nil | |||
| } | |||
| fetch := func(pageSize int, pageToken string) (string, error) { | |||
| items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| it.items = append(it.items, items...) | |||
| return nextPageToken, nil | |||
| } | |||
| it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
| it.pageInfo.MaxSize = int(req.PageSize) | |||
| it.pageInfo.Token = req.PageToken | |||
| return it | |||
| } | |||
| // DeleteSubscription deletes an existing subscription. All messages retained in the subscription | |||
| // are immediately dropped. Calls to Pull after deletion will return | |||
| // NOT_FOUND. After a subscription is deleted, a new one may be created with | |||
| // the same name, but the new one has no association with the old | |||
| // subscription or its topic unless the same topic is specified. | |||
| func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| return err | |||
| } | |||
| // ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful | |||
| // to indicate that more time is needed to process a message by the | |||
| // subscriber, or to make the message available for redelivery if the | |||
| // processing was interrupted. Note that this does not modify the | |||
| // subscription-level ackDeadlineSeconds used for subsequent messages. | |||
| func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| return err | |||
| } | |||
| // Acknowledge acknowledges the messages associated with the ack_ids in the | |||
| // AcknowledgeRequest. The Pub/Sub system can remove the relevant messages | |||
| // from the subscription. | |||
| // | |||
| // Acknowledging a message whose ack deadline has expired may succeed, | |||
| // but such a message may be redelivered later. Acknowledging a message more | |||
| // than once will not result in an error. | |||
| func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| return err | |||
| } | |||
| // Pull pulls messages from the server. The server may return UNAVAILABLE if | |||
| // there are too many concurrent pull requests pending for the given | |||
| // subscription. | |||
| func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) | |||
| var resp *pubsubpb.PullResponse | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // StreamingPull establishes a stream with the server, which sends messages down to the | |||
| // client. The client streams acknowledgements and ack deadline modifications | |||
| // back to the server. The server will close the stream and return the status | |||
| // on any error. The server may close the stream with status UNAVAILABLE to | |||
| // reassign server-side resources, in which case, the client should | |||
| // re-establish the stream. Flow control can be achieved by configuring the | |||
| // underlying RPC channel. | |||
| func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata) | |||
| opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) | |||
| var resp pubsubpb.Subscriber_StreamingPullClient | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // ModifyPushConfig modifies the PushConfig for a specified subscription. | |||
| // | |||
| // This may be used to change a push subscription to a pull one (signified by | |||
| // an empty PushConfig) or vice versa, or change the endpoint URL and other | |||
| // attributes of a push subscription. Messages will accumulate for delivery | |||
| // continuously through the call regardless of changes to the PushConfig. | |||
| func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| return err | |||
| } | |||
| // ListSnapshots lists the existing snapshots. Snapshots are used in | |||
| // <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a> | |||
| // operations, which allow | |||
| // you to manage message acknowledgments in bulk. That is, you can set the | |||
| // acknowledgment state of messages in an existing subscription to the state | |||
| // captured by a snapshot. | |||
| func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) | |||
| it := &SnapshotIterator{} | |||
| req = proto.Clone(req).(*pubsubpb.ListSnapshotsRequest) | |||
| it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { | |||
| var resp *pubsubpb.ListSnapshotsResponse | |||
| req.PageToken = pageToken | |||
| if pageSize > math.MaxInt32 { | |||
| req.PageSize = math.MaxInt32 | |||
| } else { | |||
| req.PageSize = int32(pageSize) | |||
| } | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, "", err | |||
| } | |||
| return resp.Snapshots, resp.NextPageToken, nil | |||
| } | |||
| fetch := func(pageSize int, pageToken string) (string, error) { | |||
| items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| it.items = append(it.items, items...) | |||
| return nextPageToken, nil | |||
| } | |||
| it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) | |||
| it.pageInfo.MaxSize = int(req.PageSize) | |||
| it.pageInfo.Token = req.PageToken | |||
| return it | |||
| } | |||
| // CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in | |||
| // <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a> | |||
| // operations, which allow | |||
| // you to manage message acknowledgments in bulk. That is, you can set the | |||
| // acknowledgment state of messages in an existing subscription to the state | |||
| // captured by a snapshot. | |||
| // <br><br>If the snapshot already exists, returns ALREADY_EXISTS. | |||
| // If the requested subscription doesn't exist, returns NOT_FOUND. | |||
| // If the backlog in the subscription is too old -- and the resulting snapshot | |||
| // would expire in less than 1 hour -- then FAILED_PRECONDITION is returned. | |||
| // See also the Snapshot.expire_time field. If the name is not provided in | |||
| // the request, the server will assign a random | |||
| // name for this snapshot on the same project as the subscription, conforming | |||
| // to the | |||
| // resource name | |||
| // format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The | |||
| // generated name is populated in the returned Snapshot object. Note that for | |||
| // REST API requests, you must specify a name in the request. | |||
| func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) | |||
| var resp *pubsubpb.Snapshot | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // UpdateSnapshot updates an existing snapshot. Snapshots are used in | |||
| // <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a> | |||
| // operations, which allow | |||
| // you to manage message acknowledgments in bulk. That is, you can set the | |||
| // acknowledgment state of messages in an existing subscription to the state | |||
| // captured by a snapshot. | |||
| func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot.name", url.QueryEscape(req.GetSnapshot().GetName()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...) | |||
| var resp *pubsubpb.Snapshot | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // DeleteSnapshot removes an existing snapshot. Snapshots are used in | |||
| // <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a> | |||
| // operations, which allow | |||
| // you to manage message acknowledgments in bulk. That is, you can set the | |||
| // acknowledgment state of messages in an existing subscription to the state | |||
| // captured by a snapshot.<br><br> | |||
| // When the snapshot is deleted, all messages retained in the snapshot | |||
| // are immediately dropped. After a snapshot is deleted, a new one may be | |||
| // created with the same name, but the new one has no association with the old | |||
| // snapshot or its subscription, unless the same subscription is specified. | |||
| func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| return err | |||
| } | |||
| // Seek seeks an existing subscription to a point in time or to a given snapshot, | |||
| // whichever is provided in the request. Snapshots are used in | |||
| // <a href="https://cloud.google.com/pubsub/docs/replay-overview">Seek</a> | |||
| // operations, which allow | |||
| // you to manage message acknowledgments in bulk. That is, you can set the | |||
| // acknowledgment state of messages in an existing subscription to the state | |||
| // captured by a snapshot. Note that both the subscription and the snapshot | |||
| // must be on the same topic. | |||
| func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { | |||
| md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) | |||
| ctx = insertMetadata(ctx, c.xGoogMetadata, md) | |||
| opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) | |||
| var resp *pubsubpb.SeekResponse | |||
| err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { | |||
| var err error | |||
| resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...) | |||
| return err | |||
| }, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // SnapshotIterator manages a stream of *pubsubpb.Snapshot. | |||
| type SnapshotIterator struct { | |||
| items []*pubsubpb.Snapshot | |||
| pageInfo *iterator.PageInfo | |||
| nextFunc func() error | |||
| // InternalFetch is for use by the Google Cloud Libraries only. | |||
| // It is not part of the stable interface of this package. | |||
| // | |||
| // InternalFetch returns results from a single call to the underlying RPC. | |||
| // The number of results is no greater than pageSize. | |||
| // If there are no more results, nextPageToken is empty and err is nil. | |||
| InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error) | |||
| } | |||
| // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
| func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { | |||
| return it.pageInfo | |||
| } | |||
| // Next returns the next result. Its second return value is iterator.Done if there are no more | |||
| // results. Once Next returns Done, all subsequent calls will return Done. | |||
| func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) { | |||
| var item *pubsubpb.Snapshot | |||
| if err := it.nextFunc(); err != nil { | |||
| return item, err | |||
| } | |||
| item = it.items[0] | |||
| it.items = it.items[1:] | |||
| return item, nil | |||
| } | |||
| func (it *SnapshotIterator) bufLen() int { | |||
| return len(it.items) | |||
| } | |||
| func (it *SnapshotIterator) takeBuf() interface{} { | |||
| b := it.items | |||
| it.items = nil | |||
| return b | |||
| } | |||
| // SubscriptionIterator manages a stream of *pubsubpb.Subscription. | |||
| type SubscriptionIterator struct { | |||
| items []*pubsubpb.Subscription | |||
| pageInfo *iterator.PageInfo | |||
| nextFunc func() error | |||
| // InternalFetch is for use by the Google Cloud Libraries only. | |||
| // It is not part of the stable interface of this package. | |||
| // | |||
| // InternalFetch returns results from a single call to the underlying RPC. | |||
| // The number of results is no greater than pageSize. | |||
| // If there are no more results, nextPageToken is empty and err is nil. | |||
| InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) | |||
| } | |||
| // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. | |||
| func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { | |||
| return it.pageInfo | |||
| } | |||
| // Next returns the next result. Its second return value is iterator.Done if there are no more | |||
| // results. Once Next returns Done, all subsequent calls will return Done. | |||
| func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { | |||
| var item *pubsubpb.Subscription | |||
| if err := it.nextFunc(); err != nil { | |||
| return item, err | |||
| } | |||
| item = it.items[0] | |||
| it.items = it.items[1:] | |||
| return item, nil | |||
| } | |||
| func (it *SubscriptionIterator) bufLen() int { | |||
| return len(it.items) | |||
| } | |||
| func (it *SubscriptionIterator) takeBuf() interface{} { | |||
| b := it.items | |||
| it.items = nil | |||
| return b | |||
| } | |||
| @@ -0,0 +1,72 @@ | |||
| // Copyright 2018 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // +build psdebug | |||
| package pubsub | |||
| import ( | |||
| "sync" | |||
| "time" | |||
| ) | |||
| var ( | |||
| dmu sync.Mutex | |||
| msgTraces = map[string][]Event{} | |||
| ackIDToMsgID = map[string]string{} | |||
| ) | |||
| type Event struct { | |||
| Desc string | |||
| At time.Time | |||
| } | |||
| func MessageEvents(msgID string) []Event { | |||
| dmu.Lock() | |||
| defer dmu.Unlock() | |||
| return msgTraces[msgID] | |||
| } | |||
| func addRecv(msgID, ackID string, t time.Time) { | |||
| dmu.Lock() | |||
| defer dmu.Unlock() | |||
| ackIDToMsgID[ackID] = msgID | |||
| addEvent(msgID, "recv", t) | |||
| } | |||
| func addAcks(ackIDs []string) { | |||
| dmu.Lock() | |||
| defer dmu.Unlock() | |||
| now := time.Now() | |||
| for _, id := range ackIDs { | |||
| addEvent(ackIDToMsgID[id], "ack", now) | |||
| } | |||
| } | |||
| func addModAcks(ackIDs []string, deadlineSecs int32) { | |||
| dmu.Lock() | |||
| defer dmu.Unlock() | |||
| desc := "modack" | |||
| if deadlineSecs == 0 { | |||
| desc = "nack" | |||
| } | |||
| now := time.Now() | |||
| for _, id := range ackIDs { | |||
| addEvent(ackIDToMsgID[id], desc, now) | |||
| } | |||
| } | |||
| func addEvent(msgID, desc string, t time.Time) { | |||
| msgTraces[msgID] = append(msgTraces[msgID], Event{desc, t}) | |||
| } | |||
| @@ -0,0 +1,140 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| /* | |||
| Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub | |||
| messages, hiding the details of the underlying server RPCs. Google Cloud | |||
| Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders | |||
| and receivers. | |||
| More information about Google Cloud Pub/Sub is available at | |||
| https://cloud.google.com/pubsub/docs | |||
| See https://godoc.org/cloud.google.com/go for authentication, timeouts, | |||
| connection pooling and similar aspects of this package. | |||
| Publishing | |||
| Google Cloud Pub/Sub messages are published to topics. Topics may be created | |||
| using the pubsub package like so: | |||
| topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") | |||
| Messages may then be published to a topic: | |||
| res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) | |||
| Publish queues the message for publishing and returns immediately. When enough | |||
| messages have accumulated, or enough time has elapsed, the batch of messages is | |||
| sent to the Pub/Sub service. | |||
| Publish returns a PublishResult, which behaves like a future: its Get method | |||
| blocks until the message has been sent to the service. | |||
| The first time you call Publish on a topic, goroutines are started in the | |||
| background. To clean up these goroutines, call Stop: | |||
| topic.Stop() | |||
| Receiving | |||
| To receive messages published to a topic, clients create subscriptions | |||
| to the topic. There may be more than one subscription per topic; each message | |||
| that is published to the topic will be delivered to all of its subscriptions. | |||
| Subsciptions may be created like so: | |||
| sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", | |||
| pubsub.SubscriptionConfig{Topic: topic}) | |||
| Messages are then consumed from a subscription via callback. | |||
| err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { | |||
| log.Printf("Got message: %s", m.Data) | |||
| m.Ack() | |||
| }) | |||
| if err != nil { | |||
| // Handle error. | |||
| } | |||
| The callback is invoked concurrently by multiple goroutines, maximizing | |||
| throughput. To terminate a call to Receive, cancel its context. | |||
| Once client code has processed the message, it must call Message.Ack or | |||
| message.Nack, otherwise the message will eventually be redelivered. If the | |||
| client cannot or doesn't want to process the message, it can call Message.Nack | |||
| to speed redelivery. For more information and configuration options, see | |||
| "Deadlines" below. | |||
| Note: It is possible for Messages to be redelivered, even if Message.Ack has | |||
| been called. Client code must be robust to multiple deliveries of messages. | |||
| Note: This uses pubsub's streaming pull feature. This feature properties that | |||
| may be surprising. Please take a look at https://cloud.google.com/pubsub/docs/pull#streamingpull | |||
| for more details on how streaming pull behaves compared to the synchronous | |||
| pull method. | |||
| Deadlines | |||
| The default pubsub deadlines are suitable for most use cases, but may be | |||
| overridden. This section describes the tradeoffs that should be considered | |||
| when overriding the defaults. | |||
| Behind the scenes, each message returned by the Pub/Sub server has an | |||
| associated lease, known as an "ACK deadline". Unless a message is | |||
| acknowledged within the ACK deadline, or the client requests that | |||
| the ACK deadline be extended, the message will become eligible for redelivery. | |||
| As a convenience, the pubsub client will automatically extend deadlines until | |||
| either: | |||
| * Message.Ack or Message.Nack is called, or | |||
| * The "MaxExtension" period elapses from the time the message is fetched from the server. | |||
| ACK deadlines are extended periodically by the client. The initial ACK | |||
| deadline given to messages is 10s. The period between extensions, as well as the | |||
| length of the extension, automatically adjust depending on the time it takes to ack | |||
| messages, up to 10m. This has the effect that subscribers that process messages | |||
| quickly have their message ack deadlines extended for a short amount, whereas | |||
| subscribers that process message slowly have their message ack deadlines extended | |||
| for a large amount. The net effect is fewer RPCs sent from the client library. | |||
| For example, consider a subscriber that takes 3 minutes to process each message. | |||
| Since the library has already recorded several 3 minute "time to ack"s in a | |||
| percentile distribution, future message extensions are sent with a value of 3 | |||
| minutes, every 3 minutes. Suppose the application crashes 5 seconds after the | |||
| library sends such an extension: the Pub/Sub server would wait the remaining | |||
| 2m55s before re-sending the messages out to other subscribers. | |||
| Please note that the client library does not use the subscription's AckDeadline | |||
| by default. To enforce the subscription AckDeadline, set MaxExtension to the | |||
| subscription's AckDeadline: | |||
| cfg, err := sub.Config(ctx) | |||
| if err != nil { | |||
| // TODO: handle err | |||
| } | |||
| sub.ReceiveSettings.MaxExtension = cfg.AckDeadline | |||
| Slow Message Processing | |||
| For use cases where message processing exceeds 30 minutes, we recommend using | |||
| the base client in a pull model, since long-lived streams are periodically killed | |||
| by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing | |||
| */ | |||
| package pubsub // import "cloud.google.com/go/pubsub" | |||
| @@ -0,0 +1,122 @@ | |||
| // Copyright 2017 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "sync/atomic" | |||
| "golang.org/x/sync/semaphore" | |||
| ) | |||
| // flowController implements flow control for Subscription.Receive. | |||
| type flowController struct { | |||
| maxCount int | |||
| maxSize int // max total size of messages | |||
| semCount, semSize *semaphore.Weighted // enforces max number and size of messages | |||
| // Number of calls to acquire - number of calls to release. This can go | |||
| // negative if semCount == nil and a large acquire is followed by multiple | |||
| // small releases. | |||
| // Atomic. | |||
| countRemaining int64 | |||
| } | |||
| // newFlowController creates a new flowController that ensures no more than | |||
| // maxCount messages or maxSize bytes are outstanding at once. If maxCount or | |||
| // maxSize is < 1, then an unlimited number of messages or bytes is permitted, | |||
| // respectively. | |||
| func newFlowController(maxCount, maxSize int) *flowController { | |||
| fc := &flowController{ | |||
| maxCount: maxCount, | |||
| maxSize: maxSize, | |||
| semCount: nil, | |||
| semSize: nil, | |||
| } | |||
| if maxCount > 0 { | |||
| fc.semCount = semaphore.NewWeighted(int64(maxCount)) | |||
| } | |||
| if maxSize > 0 { | |||
| fc.semSize = semaphore.NewWeighted(int64(maxSize)) | |||
| } | |||
| return fc | |||
| } | |||
| // acquire blocks until one message of size bytes can proceed or ctx is done. | |||
| // It returns nil in the first case, or ctx.Err() in the second. | |||
| // | |||
| // acquire allows large messages to proceed by treating a size greater than maxSize | |||
| // as if it were equal to maxSize. | |||
| func (f *flowController) acquire(ctx context.Context, size int) error { | |||
| if f.semCount != nil { | |||
| if err := f.semCount.Acquire(ctx, 1); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if f.semSize != nil { | |||
| if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { | |||
| if f.semCount != nil { | |||
| f.semCount.Release(1) | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| atomic.AddInt64(&f.countRemaining, 1) | |||
| return nil | |||
| } | |||
| // tryAcquire returns false if acquire would block. Otherwise, it behaves like | |||
| // acquire and returns true. | |||
| // | |||
| // tryAcquire allows large messages to proceed by treating a size greater than | |||
| // maxSize as if it were equal to maxSize. | |||
| func (f *flowController) tryAcquire(size int) bool { | |||
| if f.semCount != nil { | |||
| if !f.semCount.TryAcquire(1) { | |||
| return false | |||
| } | |||
| } | |||
| if f.semSize != nil { | |||
| if !f.semSize.TryAcquire(f.bound(size)) { | |||
| if f.semCount != nil { | |||
| f.semCount.Release(1) | |||
| } | |||
| return false | |||
| } | |||
| } | |||
| atomic.AddInt64(&f.countRemaining, 1) | |||
| return true | |||
| } | |||
| // release notes that one message of size bytes is no longer outstanding. | |||
| func (f *flowController) release(size int) { | |||
| atomic.AddInt64(&f.countRemaining, -1) | |||
| if f.semCount != nil { | |||
| f.semCount.Release(1) | |||
| } | |||
| if f.semSize != nil { | |||
| f.semSize.Release(f.bound(size)) | |||
| } | |||
| } | |||
| func (f *flowController) bound(size int) int64 { | |||
| if size > f.maxSize { | |||
| return int64(f.maxSize) | |||
| } | |||
| return int64(size) | |||
| } | |||
| func (f *flowController) count() int { | |||
| return int(atomic.LoadInt64(&f.countRemaining)) | |||
| } | |||
| @@ -0,0 +1,79 @@ | |||
| // Copyright 2017 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package distribution | |||
| import ( | |||
| "log" | |||
| "math" | |||
| "sort" | |||
| "sync" | |||
| "sync/atomic" | |||
| ) | |||
| // D is a distribution. Methods of D can be called concurrently by multiple | |||
| // goroutines. | |||
| type D struct { | |||
| buckets []uint64 | |||
| // sumsReuse is the scratch space that is reused | |||
| // to store sums during invocations of Percentile. | |||
| // After an invocation of New(n): | |||
| // len(buckets) == len(sumsReuse) == n | |||
| sumsReuse []uint64 | |||
| mu sync.Mutex | |||
| } | |||
| // New creates a new distribution capable of holding values from 0 to n-1. | |||
| func New(n int) *D { | |||
| return &D{ | |||
| buckets: make([]uint64, n), | |||
| sumsReuse: make([]uint64, n), | |||
| } | |||
| } | |||
| // Record records value v to the distribution. | |||
| // To help with distributions with long tails, if v is larger than the maximum value, | |||
| // Record records the maximum value instead. | |||
| // If v is negative, Record panics. | |||
| func (d *D) Record(v int) { | |||
| if v < 0 { | |||
| log.Panicf("Record: value out of range: %d", v) | |||
| } else if v >= len(d.buckets) { | |||
| v = len(d.buckets) - 1 | |||
| } | |||
| atomic.AddUint64(&d.buckets[v], 1) | |||
| } | |||
| // Percentile computes the p-th percentile of the distribution where | |||
| // p is between 0 and 1. This method may be called by multiple goroutines. | |||
| func (d *D) Percentile(p float64) int { | |||
| // NOTE: This implementation uses the nearest-rank method. | |||
| // https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method | |||
| if p < 0 || p > 1 { | |||
| log.Panicf("Percentile: percentile out of range: %f", p) | |||
| } | |||
| d.mu.Lock() | |||
| defer d.mu.Unlock() | |||
| var sum uint64 | |||
| for i := range d.sumsReuse { | |||
| sum += atomic.LoadUint64(&d.buckets[i]) | |||
| d.sumsReuse[i] = sum | |||
| } | |||
| target := uint64(math.Ceil(float64(sum) * p)) | |||
| return sort.Search(len(d.sumsReuse), func(i int) bool { return d.sumsReuse[i] >= target }) | |||
| } | |||
| @@ -0,0 +1,527 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "io" | |||
| "sync" | |||
| "time" | |||
| vkit "cloud.google.com/go/pubsub/apiv1" | |||
| "cloud.google.com/go/pubsub/internal/distribution" | |||
| "github.com/golang/protobuf/proto" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/status" | |||
| ) | |||
| // Between message receipt and ack (that is, the time spent processing a message) we want to extend the message | |||
| // deadline by way of modack. However, we don't want to extend the deadline right as soon as the deadline expires; | |||
| // instead, we'd want to extend the deadline a little bit of time ahead. gracePeriod is that amount of time ahead | |||
| // of the actual deadline. | |||
| const gracePeriod = 5 * time.Second | |||
| type messageIterator struct { | |||
| ctx context.Context | |||
| cancel func() // the function that will cancel ctx; called in stop | |||
| po *pullOptions | |||
| ps *pullStream | |||
| subc *vkit.SubscriberClient | |||
| subName string | |||
| kaTick <-chan time.Time // keep-alive (deadline extensions) | |||
| ackTicker *time.Ticker // message acks | |||
| nackTicker *time.Ticker // message nacks (more frequent than acks) | |||
| pingTicker *time.Ticker // sends to the stream to keep it open | |||
| failed chan struct{} // closed on stream error | |||
| drained chan struct{} // closed when stopped && no more pending messages | |||
| wg sync.WaitGroup | |||
| mu sync.Mutex | |||
| ackTimeDist *distribution.D // dist uses seconds | |||
| // keepAliveDeadlines is a map of id to expiration time. This map is used in conjunction with | |||
| // subscription.ReceiveSettings.MaxExtension to record the maximum amount of time (the | |||
| // deadline, more specifically) we're willing to extend a message's ack deadline. As each | |||
| // message arrives, we'll record now+MaxExtension in this table; whenever we have a chance | |||
| // to update ack deadlines (via modack), we'll consult this table and only include IDs | |||
| // that are not beyond their deadline. | |||
| keepAliveDeadlines map[string]time.Time | |||
| pendingAcks map[string]bool | |||
| pendingNacks map[string]bool | |||
| pendingModAcks map[string]bool // ack IDs whose ack deadline is to be modified | |||
| err error // error from stream failure | |||
| } | |||
| // newMessageIterator starts and returns a new messageIterator. | |||
| // subName is the full name of the subscription to pull messages from. | |||
| // Stop must be called on the messageIterator when it is no longer needed. | |||
| // The iterator always uses the background context for acking messages and extending message deadlines. | |||
| func newMessageIterator(subc *vkit.SubscriberClient, subName string, po *pullOptions) *messageIterator { | |||
| var ps *pullStream | |||
| if !po.synchronous { | |||
| ps = newPullStream(context.Background(), subc.StreamingPull, subName) | |||
| } | |||
| // The period will update each tick based on the distribution of acks. We'll start by arbitrarily sending | |||
| // the first keepAlive halfway towards the minimum ack deadline. | |||
| keepAlivePeriod := minAckDeadline / 2 | |||
| // Ack promptly so users don't lose work if client crashes. | |||
| ackTicker := time.NewTicker(100 * time.Millisecond) | |||
| nackTicker := time.NewTicker(100 * time.Millisecond) | |||
| pingTicker := time.NewTicker(30 * time.Second) | |||
| cctx, cancel := context.WithCancel(context.Background()) | |||
| it := &messageIterator{ | |||
| ctx: cctx, | |||
| cancel: cancel, | |||
| ps: ps, | |||
| po: po, | |||
| subc: subc, | |||
| subName: subName, | |||
| kaTick: time.After(keepAlivePeriod), | |||
| ackTicker: ackTicker, | |||
| nackTicker: nackTicker, | |||
| pingTicker: pingTicker, | |||
| failed: make(chan struct{}), | |||
| drained: make(chan struct{}), | |||
| ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1), | |||
| keepAliveDeadlines: map[string]time.Time{}, | |||
| pendingAcks: map[string]bool{}, | |||
| pendingNacks: map[string]bool{}, | |||
| pendingModAcks: map[string]bool{}, | |||
| } | |||
| it.wg.Add(1) | |||
| go it.sender() | |||
| return it | |||
| } | |||
| // Subscription.receive will call stop on its messageIterator when finished with it. | |||
| // Stop will block until Done has been called on all Messages that have been | |||
| // returned by Next, or until the context with which the messageIterator was created | |||
| // is cancelled or exceeds its deadline. | |||
| func (it *messageIterator) stop() { | |||
| it.cancel() | |||
| it.mu.Lock() | |||
| it.checkDrained() | |||
| it.mu.Unlock() | |||
| it.wg.Wait() | |||
| } | |||
| // checkDrained closes the drained channel if the iterator has been stopped and all | |||
| // pending messages have either been n/acked or expired. | |||
| // | |||
| // Called with the lock held. | |||
| func (it *messageIterator) checkDrained() { | |||
| select { | |||
| case <-it.drained: | |||
| return | |||
| default: | |||
| } | |||
| select { | |||
| case <-it.ctx.Done(): | |||
| if len(it.keepAliveDeadlines) == 0 { | |||
| close(it.drained) | |||
| } | |||
| default: | |||
| } | |||
| } | |||
| // Called when a message is acked/nacked. | |||
| func (it *messageIterator) done(ackID string, ack bool, receiveTime time.Time) { | |||
| it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second)) | |||
| it.mu.Lock() | |||
| defer it.mu.Unlock() | |||
| delete(it.keepAliveDeadlines, ackID) | |||
| if ack { | |||
| it.pendingAcks[ackID] = true | |||
| } else { | |||
| it.pendingNacks[ackID] = true | |||
| } | |||
| it.checkDrained() | |||
| } | |||
| // fail is called when a stream method returns a permanent error. | |||
| // fail returns it.err. This may be err, or it may be the error | |||
| // set by an earlier call to fail. | |||
| func (it *messageIterator) fail(err error) error { | |||
| it.mu.Lock() | |||
| defer it.mu.Unlock() | |||
| if it.err == nil { | |||
| it.err = err | |||
| close(it.failed) | |||
| } | |||
| return it.err | |||
| } | |||
| // receive makes a call to the stream's Recv method, or the Pull RPC, and returns | |||
| // its messages. | |||
| // maxToPull is the maximum number of messages for the Pull RPC. | |||
| func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { | |||
| it.mu.Lock() | |||
| ierr := it.err | |||
| it.mu.Unlock() | |||
| if ierr != nil { | |||
| return nil, ierr | |||
| } | |||
| // Stop retrieving messages if the iterator's Stop method was called. | |||
| select { | |||
| case <-it.ctx.Done(): | |||
| it.wg.Wait() | |||
| return nil, io.EOF | |||
| default: | |||
| } | |||
| var rmsgs []*pb.ReceivedMessage | |||
| var err error | |||
| if it.po.synchronous { | |||
| rmsgs, err = it.pullMessages(maxToPull) | |||
| } else { | |||
| rmsgs, err = it.recvMessages() | |||
| } | |||
| // Any error here is fatal. | |||
| if err != nil { | |||
| return nil, it.fail(err) | |||
| } | |||
| msgs, err := convertMessages(rmsgs) | |||
| if err != nil { | |||
| return nil, it.fail(err) | |||
| } | |||
| // We received some messages. Remember them so we can keep them alive. Also, | |||
| // do a receipt mod-ack when streaming. | |||
| maxExt := time.Now().Add(it.po.maxExtension) | |||
| ackIDs := map[string]bool{} | |||
| it.mu.Lock() | |||
| now := time.Now() | |||
| for _, m := range msgs { | |||
| m.receiveTime = now | |||
| addRecv(m.ID, m.ackID, now) | |||
| m.doneFunc = it.done | |||
| it.keepAliveDeadlines[m.ackID] = maxExt | |||
| // Don't change the mod-ack if the message is going to be nacked. This is | |||
| // possible if there are retries. | |||
| if !it.pendingNacks[m.ackID] { | |||
| ackIDs[m.ackID] = true | |||
| } | |||
| } | |||
| deadline := it.ackDeadline() | |||
| it.mu.Unlock() | |||
| if len(ackIDs) > 0 { | |||
| if !it.sendModAck(ackIDs, deadline) { | |||
| return nil, it.err | |||
| } | |||
| } | |||
| return msgs, nil | |||
| } | |||
| // Get messages using the Pull RPC. | |||
| // This may block indefinitely. It may also return zero messages, after some time waiting. | |||
| func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, error) { | |||
| // Use it.ctx as the RPC context, so that if the iterator is stopped, the call | |||
| // will return immediately. | |||
| res, err := it.subc.Pull(it.ctx, &pb.PullRequest{ | |||
| Subscription: it.subName, | |||
| MaxMessages: maxToPull, | |||
| }, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) | |||
| switch { | |||
| case err == context.Canceled: | |||
| return nil, nil | |||
| case err != nil: | |||
| return nil, err | |||
| default: | |||
| return res.ReceivedMessages, nil | |||
| } | |||
| } | |||
| func (it *messageIterator) recvMessages() ([]*pb.ReceivedMessage, error) { | |||
| res, err := it.ps.Recv() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return res.ReceivedMessages, nil | |||
| } | |||
| // sender runs in a goroutine and handles all sends to the stream. | |||
| func (it *messageIterator) sender() { | |||
| defer it.wg.Done() | |||
| defer it.ackTicker.Stop() | |||
| defer it.nackTicker.Stop() | |||
| defer it.pingTicker.Stop() | |||
| defer func() { | |||
| if it.ps != nil { | |||
| it.ps.CloseSend() | |||
| } | |||
| }() | |||
| done := false | |||
| for !done { | |||
| sendAcks := false | |||
| sendNacks := false | |||
| sendModAcks := false | |||
| sendPing := false | |||
| dl := it.ackDeadline() | |||
| select { | |||
| case <-it.failed: | |||
| // Stream failed: nothing to do, so stop immediately. | |||
| return | |||
| case <-it.drained: | |||
| // All outstanding messages have been marked done: | |||
| // nothing left to do except make the final calls. | |||
| it.mu.Lock() | |||
| sendAcks = (len(it.pendingAcks) > 0) | |||
| sendNacks = (len(it.pendingNacks) > 0) | |||
| // No point in sending modacks. | |||
| done = true | |||
| case <-it.kaTick: | |||
| it.mu.Lock() | |||
| it.handleKeepAlives() | |||
| sendModAcks = (len(it.pendingModAcks) > 0) | |||
| nextTick := dl - gracePeriod | |||
| if nextTick <= 0 { | |||
| // If the deadline is <= gracePeriod, let's tick again halfway to | |||
| // the deadline. | |||
| nextTick = dl / 2 | |||
| } | |||
| it.kaTick = time.After(nextTick) | |||
| case <-it.nackTicker.C: | |||
| it.mu.Lock() | |||
| sendNacks = (len(it.pendingNacks) > 0) | |||
| case <-it.ackTicker.C: | |||
| it.mu.Lock() | |||
| sendAcks = (len(it.pendingAcks) > 0) | |||
| case <-it.pingTicker.C: | |||
| it.mu.Lock() | |||
| // Ping only if we are processing messages via streaming. | |||
| sendPing = !it.po.synchronous && (len(it.keepAliveDeadlines) > 0) | |||
| } | |||
| // Lock is held here. | |||
| var acks, nacks, modAcks map[string]bool | |||
| if sendAcks { | |||
| acks = it.pendingAcks | |||
| it.pendingAcks = map[string]bool{} | |||
| } | |||
| if sendNacks { | |||
| nacks = it.pendingNacks | |||
| it.pendingNacks = map[string]bool{} | |||
| } | |||
| if sendModAcks { | |||
| modAcks = it.pendingModAcks | |||
| it.pendingModAcks = map[string]bool{} | |||
| } | |||
| it.mu.Unlock() | |||
| // Make Ack and ModAck RPCs. | |||
| if sendAcks { | |||
| if !it.sendAck(acks) { | |||
| return | |||
| } | |||
| } | |||
| if sendNacks { | |||
| // Nack indicated by modifying the deadline to zero. | |||
| if !it.sendModAck(nacks, 0) { | |||
| return | |||
| } | |||
| } | |||
| if sendModAcks { | |||
| if !it.sendModAck(modAcks, dl) { | |||
| return | |||
| } | |||
| } | |||
| if sendPing { | |||
| it.pingStream() | |||
| } | |||
| } | |||
| } | |||
| // handleKeepAlives modifies the pending request to include deadline extensions | |||
| // for live messages. It also purges expired messages. | |||
| // | |||
| // Called with the lock held. | |||
| func (it *messageIterator) handleKeepAlives() { | |||
| now := time.Now() | |||
| for id, expiry := range it.keepAliveDeadlines { | |||
| if expiry.Before(now) { | |||
| // This delete will not result in skipping any map items, as implied by | |||
| // the spec at https://golang.org/ref/spec#For_statements, "For | |||
| // statements with range clause", note 3, and stated explicitly at | |||
| // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ. | |||
| delete(it.keepAliveDeadlines, id) | |||
| } else { | |||
| // This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines. | |||
| it.pendingModAcks[id] = true | |||
| } | |||
| } | |||
| it.checkDrained() | |||
| } | |||
| func (it *messageIterator) sendAck(m map[string]bool) bool { | |||
| // Account for the Subscription field. | |||
| overhead := calcFieldSizeString(it.subName) | |||
| return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { | |||
| recordStat(it.ctx, AckCount, int64(len(ids))) | |||
| addAcks(ids) | |||
| // Use context.Background() as the call's context, not it.ctx. We don't | |||
| // want to cancel this RPC when the iterator is stopped. | |||
| return it.subc.Acknowledge(context.Background(), &pb.AcknowledgeRequest{ | |||
| Subscription: it.subName, | |||
| AckIds: ids, | |||
| }) | |||
| }) | |||
| } | |||
| // The receipt mod-ack amount is derived from a percentile distribution based | |||
| // on the time it takes to process messages. The percentile chosen is the 99%th | |||
| // percentile in order to capture the highest amount of time necessary without | |||
| // considering 1% outliers. | |||
| func (it *messageIterator) sendModAck(m map[string]bool, deadline time.Duration) bool { | |||
| deadlineSec := int32(deadline / time.Second) | |||
| // Account for the Subscription and AckDeadlineSeconds fields. | |||
| overhead := calcFieldSizeString(it.subName) + calcFieldSizeInt(int(deadlineSec)) | |||
| return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { | |||
| if deadline == 0 { | |||
| recordStat(it.ctx, NackCount, int64(len(ids))) | |||
| } else { | |||
| recordStat(it.ctx, ModAckCount, int64(len(ids))) | |||
| } | |||
| addModAcks(ids, deadlineSec) | |||
| // Retry this RPC on Unavailable for a short amount of time, then give up | |||
| // without returning a fatal error. The utility of this RPC is by nature | |||
| // transient (since the deadline is relative to the current time) and it | |||
| // isn't crucial for correctness (since expired messages will just be | |||
| // resent). | |||
| cctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) | |||
| defer cancel() | |||
| bo := gax.Backoff{ | |||
| Initial: 100 * time.Millisecond, | |||
| Max: time.Second, | |||
| Multiplier: 2, | |||
| } | |||
| for { | |||
| err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{ | |||
| Subscription: it.subName, | |||
| AckDeadlineSeconds: deadlineSec, | |||
| AckIds: ids, | |||
| }) | |||
| switch status.Code(err) { | |||
| case codes.Unavailable: | |||
| if err := gax.Sleep(cctx, bo.Pause()); err == nil { | |||
| continue | |||
| } | |||
| // Treat sleep timeout like RPC timeout. | |||
| fallthrough | |||
| case codes.DeadlineExceeded: | |||
| // Timeout. Not a fatal error, but note that it happened. | |||
| recordStat(it.ctx, ModAckTimeoutCount, 1) | |||
| return nil | |||
| default: | |||
| // Any other error is fatal. | |||
| return err | |||
| } | |||
| } | |||
| }) | |||
| } | |||
| func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, call func([]string) error) bool { | |||
| ackIDs := make([]string, 0, len(ackIDSet)) | |||
| for k := range ackIDSet { | |||
| ackIDs = append(ackIDs, k) | |||
| } | |||
| var toSend []string | |||
| for len(ackIDs) > 0 { | |||
| toSend, ackIDs = splitRequestIDs(ackIDs, maxSize) | |||
| if err := call(toSend); err != nil { | |||
| // The underlying client handles retries, so any error is fatal to the | |||
| // iterator. | |||
| it.fail(err) | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| // Send a message to the stream to keep it open. The stream will close if there's no | |||
| // traffic on it for a while. By keeping it open, we delay the start of the | |||
| // expiration timer on messages that are buffered by gRPC or elsewhere in the | |||
| // network. This matters if it takes a long time to process messages relative to the | |||
| // default ack deadline, and if the messages are small enough so that many can fit | |||
| // into the buffer. | |||
| func (it *messageIterator) pingStream() { | |||
| // Ignore error; if the stream is broken, this doesn't matter anyway. | |||
| _ = it.ps.Send(&pb.StreamingPullRequest{}) | |||
| } | |||
| // calcFieldSizeString returns the number of bytes string fields | |||
| // will take up in an encoded proto message. | |||
| func calcFieldSizeString(fields ...string) int { | |||
| overhead := 0 | |||
| for _, field := range fields { | |||
| overhead += 1 + len(field) + proto.SizeVarint(uint64(len(field))) | |||
| } | |||
| return overhead | |||
| } | |||
| // calcFieldSizeInt returns the number of bytes int fields | |||
| // will take up in an encoded proto message. | |||
| func calcFieldSizeInt(fields ...int) int { | |||
| overhead := 0 | |||
| for _, field := range fields { | |||
| overhead += 1 + proto.SizeVarint(uint64(field)) | |||
| } | |||
| return overhead | |||
| } | |||
| // splitRequestIDs takes a slice of ackIDs and returns two slices such that the first | |||
| // ackID slice can be used in a request where the payload does not exceed maxSize. | |||
| func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) { | |||
| size := 0 | |||
| i := 0 | |||
| // TODO(hongalex): Use binary search to find split index, since ackIDs are | |||
| // fairly constant. | |||
| for size < maxSize && i < len(ids) { | |||
| size += calcFieldSizeString(ids[i]) | |||
| i++ | |||
| } | |||
| if size > maxSize { | |||
| i-- | |||
| } | |||
| return ids[:i], ids[i:] | |||
| } | |||
| // The deadline to ack is derived from a percentile distribution based | |||
| // on the time it takes to process messages. The percentile chosen is the 99%th | |||
| // percentile - that is, processing times up to the 99%th longest processing | |||
| // times should be safe. The highest 1% may expire. This number was chosen | |||
| // as a way to cover most users' usecases without losing the value of | |||
| // expiration. | |||
| func (it *messageIterator) ackDeadline() time.Duration { | |||
| pt := time.Duration(it.ackTimeDist.Percentile(.99)) * time.Second | |||
| if pt > maxAckDeadline { | |||
| return maxAckDeadline | |||
| } | |||
| if pt < minAckDeadline { | |||
| return minAckDeadline | |||
| } | |||
| return pt | |||
| } | |||
| @@ -0,0 +1,100 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "time" | |||
| "github.com/golang/protobuf/ptypes" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| ) | |||
| // Message represents a Pub/Sub message. | |||
| type Message struct { | |||
| // ID identifies this message. | |||
| // This ID is assigned by the server and is populated for Messages obtained from a subscription. | |||
| // This field is read-only. | |||
| ID string | |||
| // Data is the actual data in the message. | |||
| Data []byte | |||
| // Attributes represents the key-value pairs the current message | |||
| // is labelled with. | |||
| Attributes map[string]string | |||
| // ackID is the identifier to acknowledge this message. | |||
| ackID string | |||
| // The time at which the message was published. | |||
| // This is populated by the server for Messages obtained from a subscription. | |||
| // This field is read-only. | |||
| PublishTime time.Time | |||
| // receiveTime is the time the message was received by the client. | |||
| receiveTime time.Time | |||
| // size is the approximate size of the message's data and attributes. | |||
| size int | |||
| calledDone bool | |||
| // The done method of the iterator that created this Message. | |||
| doneFunc func(string, bool, time.Time) | |||
| } | |||
| func toMessage(resp *pb.ReceivedMessage) (*Message, error) { | |||
| if resp.Message == nil { | |||
| return &Message{ackID: resp.AckId}, nil | |||
| } | |||
| pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &Message{ | |||
| ackID: resp.AckId, | |||
| Data: resp.Message.Data, | |||
| Attributes: resp.Message.Attributes, | |||
| ID: resp.Message.MessageId, | |||
| PublishTime: pubTime, | |||
| }, nil | |||
| } | |||
| // Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. | |||
| // It should not be called on any other Message value. | |||
| // If message acknowledgement fails, the Message will be redelivered. | |||
| // Client code must call Ack or Nack when finished for each received Message. | |||
| // Calls to Ack or Nack have no effect after the first call. | |||
| func (m *Message) Ack() { | |||
| m.done(true) | |||
| } | |||
| // Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. | |||
| // It should not be called on any other Message value. | |||
| // Nack will result in the Message being redelivered more quickly than if it were allowed to expire. | |||
| // Client code must call Ack or Nack when finished for each received Message. | |||
| // Calls to Ack or Nack have no effect after the first call. | |||
| func (m *Message) Nack() { | |||
| m.done(false) | |||
| } | |||
| func (m *Message) done(ack bool) { | |||
| if m.calledDone { | |||
| return | |||
| } | |||
| m.calledDone = true | |||
| m.doneFunc(m.ackID, ack, m.receiveTime) | |||
| } | |||
| @@ -0,0 +1,25 @@ | |||
| // Copyright 2018 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // +build !psdebug | |||
| package pubsub | |||
| import "time" | |||
| func addRecv(string, string, time.Time) {} | |||
| func addAcks([]string) {} | |||
| func addModAcks([]string, int32) {} | |||
| @@ -0,0 +1,108 @@ | |||
| // Copyright 2014 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub // import "cloud.google.com/go/pubsub" | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "os" | |||
| "runtime" | |||
| "time" | |||
| "cloud.google.com/go/internal/version" | |||
| vkit "cloud.google.com/go/pubsub/apiv1" | |||
| "google.golang.org/api/option" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/keepalive" | |||
| ) | |||
| const ( | |||
| // ScopePubSub grants permissions to view and manage Pub/Sub | |||
| // topics and subscriptions. | |||
| ScopePubSub = "https://www.googleapis.com/auth/pubsub" | |||
| // ScopeCloudPlatform grants permissions to view and manage your data | |||
| // across Google Cloud Platform services. | |||
| ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" | |||
| maxAckDeadline = 10 * time.Minute | |||
| ) | |||
| // Client is a Google Pub/Sub client scoped to a single project. | |||
| // | |||
| // Clients should be reused rather than being created as needed. | |||
| // A Client may be shared by multiple goroutines. | |||
| type Client struct { | |||
| projectID string | |||
| pubc *vkit.PublisherClient | |||
| subc *vkit.SubscriberClient | |||
| } | |||
| // NewClient creates a new PubSub client. | |||
| func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) { | |||
| var o []option.ClientOption | |||
| // Environment variables for gcloud emulator: | |||
| // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ | |||
| if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { | |||
| conn, err := grpc.Dial(addr, grpc.WithInsecure()) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("grpc.Dial: %v", err) | |||
| } | |||
| o = []option.ClientOption{option.WithGRPCConn(conn)} | |||
| } else { | |||
| o = []option.ClientOption{ | |||
| // Create multiple connections to increase throughput. | |||
| option.WithGRPCConnectionPool(runtime.GOMAXPROCS(0)), | |||
| option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{ | |||
| Time: 5 * time.Minute, | |||
| })), | |||
| } | |||
| o = append(o, openCensusOptions()...) | |||
| } | |||
| o = append(o, opts...) | |||
| pubc, err := vkit.NewPublisherClient(ctx, o...) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("pubsub: %v", err) | |||
| } | |||
| subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) | |||
| if err != nil { | |||
| // Should never happen, since we are passing in the connection. | |||
| // If it does, we cannot close, because the user may have passed in their | |||
| // own connection originally. | |||
| return nil, fmt.Errorf("pubsub: %v", err) | |||
| } | |||
| pubc.SetGoogleClientInfo("gccl", version.Repo) | |||
| return &Client{ | |||
| projectID: projectID, | |||
| pubc: pubc, | |||
| subc: subc, | |||
| }, nil | |||
| } | |||
| // Close releases any resources held by the client, | |||
| // such as memory and goroutines. | |||
| // | |||
| // If the client is available for the lifetime of the program, then Close need not be | |||
| // called at exit. | |||
| func (c *Client) Close() error { | |||
| // Return the first error, because the first call closes the connection. | |||
| err := c.pubc.Close() | |||
| _ = c.subc.Close() | |||
| return err | |||
| } | |||
| func (c *Client) fullyQualifiedProjectName() string { | |||
| return fmt.Sprintf("projects/%s", c.projectID) | |||
| } | |||
| @@ -0,0 +1,192 @@ | |||
| // Copyright 2018 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "io" | |||
| "sync" | |||
| "time" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| "google.golang.org/grpc" | |||
| ) | |||
| // A pullStream supports the methods of a StreamingPullClient, but re-opens | |||
| // the stream on a retryable error. | |||
| type pullStream struct { | |||
| ctx context.Context | |||
| open func() (pb.Subscriber_StreamingPullClient, error) | |||
| mu sync.Mutex | |||
| spc *pb.Subscriber_StreamingPullClient | |||
| err error // permanent error | |||
| } | |||
| // for testing | |||
| type streamingPullFunc func(context.Context, ...gax.CallOption) (pb.Subscriber_StreamingPullClient, error) | |||
| func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string) *pullStream { | |||
| ctx = withSubscriptionKey(ctx, subName) | |||
| return &pullStream{ | |||
| ctx: ctx, | |||
| open: func() (pb.Subscriber_StreamingPullClient, error) { | |||
| spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) | |||
| if err == nil { | |||
| recordStat(ctx, StreamRequestCount, 1) | |||
| err = spc.Send(&pb.StreamingPullRequest{ | |||
| Subscription: subName, | |||
| // We modack messages when we receive them, so this value doesn't matter too much. | |||
| StreamAckDeadlineSeconds: 60, | |||
| }) | |||
| } | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return spc, nil | |||
| }, | |||
| } | |||
| } | |||
| // get returns either a valid *StreamingPullClient (SPC), or a permanent error. | |||
| // If the argument is nil, this is the first call for an RPC, and the current | |||
| // SPC will be returned (or a new one will be opened). Otherwise, this call is a | |||
| // request to re-open the stream because of a retryable error, and the argument | |||
| // is a pointer to the SPC that returned the error. | |||
| func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber_StreamingPullClient, error) { | |||
| s.mu.Lock() | |||
| defer s.mu.Unlock() | |||
| // A stored error is permanent. | |||
| if s.err != nil { | |||
| return nil, s.err | |||
| } | |||
| // If the context is done, so are we. | |||
| s.err = s.ctx.Err() | |||
| if s.err != nil { | |||
| return nil, s.err | |||
| } | |||
| // If the current and argument SPCs differ, return the current one. This subsumes two cases: | |||
| // 1. We have an SPC and the caller is getting the stream for the first time. | |||
| // 2. The caller wants to retry, but they have an older SPC; we've already retried. | |||
| if spc != s.spc { | |||
| return s.spc, nil | |||
| } | |||
| // Either this is the very first call on this stream (s.spc == nil), or we have a valid | |||
| // retry request. Either way, open a new stream. | |||
| // The lock is held here for a long time, but it doesn't matter because no callers could get | |||
| // anything done anyway. | |||
| s.spc = new(pb.Subscriber_StreamingPullClient) | |||
| *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. | |||
| return s.spc, s.err | |||
| } | |||
| func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) { | |||
| r := defaultRetryer{} | |||
| for { | |||
| recordStat(s.ctx, StreamOpenCount, 1) | |||
| spc, err := s.open() | |||
| bo, shouldRetry := r.Retry(err) | |||
| if err != nil && shouldRetry { | |||
| recordStat(s.ctx, StreamRetryCount, 1) | |||
| if err := gax.Sleep(s.ctx, bo); err != nil { | |||
| return nil, err | |||
| } | |||
| continue | |||
| } | |||
| return spc, err | |||
| } | |||
| } | |||
| func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error, opts ...gax.CallOption) error { | |||
| var settings gax.CallSettings | |||
| for _, opt := range opts { | |||
| opt.Resolve(&settings) | |||
| } | |||
| var r gax.Retryer = &defaultRetryer{} | |||
| if settings.Retry != nil { | |||
| r = settings.Retry() | |||
| } | |||
| var ( | |||
| spc *pb.Subscriber_StreamingPullClient | |||
| err error | |||
| ) | |||
| for { | |||
| spc, err = s.get(spc) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| start := time.Now() | |||
| err = f(*spc) | |||
| if err != nil { | |||
| bo, shouldRetry := r.Retry(err) | |||
| if shouldRetry { | |||
| recordStat(s.ctx, StreamRetryCount, 1) | |||
| if time.Since(start) < 30*time.Second { // don't sleep if we've been blocked for a while | |||
| if err := gax.Sleep(s.ctx, bo); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| s.mu.Lock() | |||
| s.err = err | |||
| s.mu.Unlock() | |||
| } | |||
| return err | |||
| } | |||
| } | |||
| func (s *pullStream) Send(req *pb.StreamingPullRequest) error { | |||
| return s.call(func(spc pb.Subscriber_StreamingPullClient) error { | |||
| recordStat(s.ctx, AckCount, int64(len(req.AckIds))) | |||
| zeroes := 0 | |||
| for _, mds := range req.ModifyDeadlineSeconds { | |||
| if mds == 0 { | |||
| zeroes++ | |||
| } | |||
| } | |||
| recordStat(s.ctx, NackCount, int64(zeroes)) | |||
| recordStat(s.ctx, ModAckCount, int64(len(req.ModifyDeadlineSeconds)-zeroes)) | |||
| recordStat(s.ctx, StreamRequestCount, 1) | |||
| return spc.Send(req) | |||
| }) | |||
| } | |||
| func (s *pullStream) Recv() (*pb.StreamingPullResponse, error) { | |||
| var res *pb.StreamingPullResponse | |||
| err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { | |||
| var err error | |||
| recordStat(s.ctx, StreamResponseCount, 1) | |||
| res, err = spc.Recv() | |||
| if err == nil { | |||
| recordStat(s.ctx, PullCount, int64(len(res.ReceivedMessages))) | |||
| } | |||
| return err | |||
| }, gax.WithRetry(func() gax.Retryer { return &streamingPullRetryer{defaultRetryer: &defaultRetryer{}} })) | |||
| return res, err | |||
| } | |||
| func (s *pullStream) CloseSend() error { | |||
| err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { | |||
| return spc.CloseSend() | |||
| }) | |||
| s.mu.Lock() | |||
| s.err = io.EOF // should not be retried | |||
| s.mu.Unlock() | |||
| return err | |||
| } | |||
| @@ -0,0 +1,100 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "fmt" | |||
| "math" | |||
| "strings" | |||
| "time" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/status" | |||
| ) | |||
| // maxPayload is the maximum number of bytes to devote to the | |||
| // encoded AcknowledgementRequest / ModifyAckDeadline proto message. | |||
| // | |||
| // With gRPC there is no way for the client to know the server's max message size (it is | |||
| // configurable on the server). We know from experience that it | |||
| // it 512K. | |||
| const ( | |||
| maxPayload = 512 * 1024 | |||
| maxSendRecvBytes = 20 * 1024 * 1024 // 20M | |||
| ) | |||
| func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { | |||
| msgs := make([]*Message, 0, len(rms)) | |||
| for i, m := range rms { | |||
| msg, err := toMessage(m) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) | |||
| } | |||
| msgs = append(msgs, msg) | |||
| } | |||
| return msgs, nil | |||
| } | |||
| func trunc32(i int64) int32 { | |||
| if i > math.MaxInt32 { | |||
| i = math.MaxInt32 | |||
| } | |||
| return int32(i) | |||
| } | |||
| type defaultRetryer struct { | |||
| bo gax.Backoff | |||
| } | |||
| // Logic originally from | |||
| // https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-clients/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java | |||
| func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { | |||
| s, ok := status.FromError(err) | |||
| if !ok { // includes io.EOF, normal stream close, which causes us to reopen | |||
| return r.bo.Pause(), true | |||
| } | |||
| switch s.Code() { | |||
| case codes.DeadlineExceeded, codes.Internal, codes.ResourceExhausted, codes.Aborted: | |||
| return r.bo.Pause(), true | |||
| case codes.Unavailable: | |||
| c := strings.Contains(s.Message(), "Server shutdownNow invoked") | |||
| if !c { | |||
| return r.bo.Pause(), true | |||
| } | |||
| return 0, false | |||
| default: | |||
| return 0, false | |||
| } | |||
| } | |||
| type streamingPullRetryer struct { | |||
| defaultRetryer gax.Retryer | |||
| } | |||
| // Does not retry ResourceExhausted. See: https://github.com/GoogleCloudPlatform/google-cloud-go/issues/1166#issuecomment-443744705 | |||
| func (r *streamingPullRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { | |||
| s, ok := status.FromError(err) | |||
| if !ok { // call defaultRetryer so that its backoff can be used | |||
| return r.defaultRetryer.Retry(err) | |||
| } | |||
| switch s.Code() { | |||
| case codes.ResourceExhausted: | |||
| return 0, false | |||
| default: | |||
| return r.defaultRetryer.Retry(err) | |||
| } | |||
| } | |||
| @@ -0,0 +1,160 @@ | |||
| // Copyright 2017 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "strings" | |||
| "time" | |||
| "github.com/golang/protobuf/ptypes" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| ) | |||
| // Snapshot is a reference to a PubSub snapshot. | |||
| type Snapshot struct { | |||
| c *Client | |||
| // The fully qualified identifier for the snapshot, in the format "projects/<projid>/snapshots/<snap>" | |||
| name string | |||
| } | |||
| // ID returns the unique identifier of the snapshot within its project. | |||
| func (s *Snapshot) ID() string { | |||
| slash := strings.LastIndex(s.name, "/") | |||
| if slash == -1 { | |||
| // name is not a fully-qualified name. | |||
| panic("bad snapshot name") | |||
| } | |||
| return s.name[slash+1:] | |||
| } | |||
| // SnapshotConfig contains the details of a Snapshot. | |||
| type SnapshotConfig struct { | |||
| *Snapshot | |||
| Topic *Topic | |||
| Expiration time.Time | |||
| } | |||
| // Snapshot creates a reference to a snapshot. | |||
| func (c *Client) Snapshot(id string) *Snapshot { | |||
| return &Snapshot{ | |||
| c: c, | |||
| name: fmt.Sprintf("projects/%s/snapshots/%s", c.projectID, id), | |||
| } | |||
| } | |||
| // Snapshots returns an iterator which returns snapshots for this project. | |||
| func (c *Client) Snapshots(ctx context.Context) *SnapshotConfigIterator { | |||
| it := c.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{ | |||
| Project: c.fullyQualifiedProjectName(), | |||
| }) | |||
| next := func() (*SnapshotConfig, error) { | |||
| snap, err := it.Next() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return toSnapshotConfig(snap, c) | |||
| } | |||
| return &SnapshotConfigIterator{next: next} | |||
| } | |||
| // SnapshotConfigIterator is an iterator that returns a series of snapshots. | |||
| type SnapshotConfigIterator struct { | |||
| next func() (*SnapshotConfig, error) | |||
| } | |||
| // Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results. | |||
| // Once Next returns iterator.Done, all subsequent calls will return iterator.Done. | |||
| func (snaps *SnapshotConfigIterator) Next() (*SnapshotConfig, error) { | |||
| return snaps.next() | |||
| } | |||
| // Delete deletes a snapshot. | |||
| func (s *Snapshot) Delete(ctx context.Context) error { | |||
| return s.c.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: s.name}) | |||
| } | |||
| // SeekToTime seeks the subscription to a point in time. | |||
| // | |||
| // Messages retained in the subscription that were published before this | |||
| // time are marked as acknowledged, and messages retained in the | |||
| // subscription that were published after this time are marked as | |||
| // unacknowledged. Note that this operation affects only those messages | |||
| // retained in the subscription (configured by SnapshotConfig). For example, | |||
| // if `time` corresponds to a point before the message retention | |||
| // window (or to a point before the system's notion of the subscription | |||
| // creation time), only retained messages will be marked as unacknowledged, | |||
| // and already-expunged messages will not be restored. | |||
| func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { | |||
| ts, err := ptypes.TimestampProto(t) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = s.c.subc.Seek(ctx, &pb.SeekRequest{ | |||
| Subscription: s.name, | |||
| Target: &pb.SeekRequest_Time{Time: ts}, | |||
| }) | |||
| return err | |||
| } | |||
| // CreateSnapshot creates a new snapshot from this subscription. | |||
| // The snapshot will be for the topic this subscription is subscribed to. | |||
| // If the name is empty string, a unique name is assigned. | |||
| // | |||
| // The created snapshot is guaranteed to retain: | |||
| // (a) The existing backlog on the subscription. More precisely, this is | |||
| // defined as the messages in the subscription's backlog that are | |||
| // unacknowledged when Snapshot returns without error. | |||
| // (b) Any messages published to the subscription's topic following | |||
| // Snapshot returning without error. | |||
| func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) { | |||
| if name != "" { | |||
| name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name) | |||
| } | |||
| snap, err := s.c.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{ | |||
| Name: name, | |||
| Subscription: s.name, | |||
| }) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return toSnapshotConfig(snap, s.c) | |||
| } | |||
| // SeekToSnapshot seeks the subscription to a snapshot. | |||
| // | |||
| // The snapshot need not be created from this subscription, | |||
| // but it must be for the topic this subscription is subscribed to. | |||
| func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error { | |||
| _, err := s.c.subc.Seek(ctx, &pb.SeekRequest{ | |||
| Subscription: s.name, | |||
| Target: &pb.SeekRequest_Snapshot{Snapshot: snap.name}, | |||
| }) | |||
| return err | |||
| } | |||
| func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) { | |||
| exp, err := ptypes.Timestamp(snap.ExpireTime) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &SnapshotConfig{ | |||
| Snapshot: &Snapshot{c: c, name: snap.Name}, | |||
| Topic: newTopic(c, snap.Topic), | |||
| Expiration: exp, | |||
| }, nil | |||
| } | |||
| @@ -0,0 +1,741 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "cloud.google.com/go/iam" | |||
| "cloud.google.com/go/internal/optional" | |||
| "github.com/golang/protobuf/ptypes" | |||
| durpb "github.com/golang/protobuf/ptypes/duration" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| "golang.org/x/sync/errgroup" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| fmpb "google.golang.org/genproto/protobuf/field_mask" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/status" | |||
| ) | |||
| // Subscription is a reference to a PubSub subscription. | |||
| type Subscription struct { | |||
| c *Client | |||
| // The fully qualified identifier for the subscription, in the format "projects/<projid>/subscriptions/<name>" | |||
| name string | |||
| // Settings for pulling messages. Configure these before calling Receive. | |||
| ReceiveSettings ReceiveSettings | |||
| mu sync.Mutex | |||
| receiveActive bool | |||
| } | |||
| // Subscription creates a reference to a subscription. | |||
| func (c *Client) Subscription(id string) *Subscription { | |||
| return c.SubscriptionInProject(id, c.projectID) | |||
| } | |||
| // SubscriptionInProject creates a reference to a subscription in a given project. | |||
| func (c *Client) SubscriptionInProject(id, projectID string) *Subscription { | |||
| return &Subscription{ | |||
| c: c, | |||
| name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, id), | |||
| } | |||
| } | |||
| // String returns the globally unique printable name of the subscription. | |||
| func (s *Subscription) String() string { | |||
| return s.name | |||
| } | |||
| // ID returns the unique identifier of the subscription within its project. | |||
| func (s *Subscription) ID() string { | |||
| slash := strings.LastIndex(s.name, "/") | |||
| if slash == -1 { | |||
| // name is not a fully-qualified name. | |||
| panic("bad subscription name") | |||
| } | |||
| return s.name[slash+1:] | |||
| } | |||
| // Subscriptions returns an iterator which returns all of the subscriptions for the client's project. | |||
| func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { | |||
| it := c.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{ | |||
| Project: c.fullyQualifiedProjectName(), | |||
| }) | |||
| return &SubscriptionIterator{ | |||
| c: c, | |||
| next: func() (string, error) { | |||
| sub, err := it.Next() | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| return sub.Name, nil | |||
| }, | |||
| } | |||
| } | |||
| // SubscriptionIterator is an iterator that returns a series of subscriptions. | |||
| type SubscriptionIterator struct { | |||
| c *Client | |||
| next func() (string, error) | |||
| } | |||
| // Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned. | |||
| func (subs *SubscriptionIterator) Next() (*Subscription, error) { | |||
| subName, err := subs.next() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &Subscription{c: subs.c, name: subName}, nil | |||
| } | |||
| // PushConfig contains configuration for subscriptions that operate in push mode. | |||
| type PushConfig struct { | |||
| // A URL locating the endpoint to which messages should be pushed. | |||
| Endpoint string | |||
| // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details. | |||
| Attributes map[string]string | |||
| // AuthenticationMethod is used by push endpoints to verify the source | |||
| // of push requests. | |||
| // It can be used with push endpoints that are private by default to | |||
| // allow requests only from the Cloud Pub/Sub system, for example. | |||
| // This field is optional and should be set only by users interested in | |||
| // authenticated push. | |||
| // | |||
| // It is EXPERIMENTAL and a part of a closed alpha that may not be | |||
| // accessible to all users. This field is subject to change or removal | |||
| // without notice. | |||
| AuthenticationMethod AuthenticationMethod | |||
| } | |||
| func (pc *PushConfig) toProto() *pb.PushConfig { | |||
| if pc == nil { | |||
| return nil | |||
| } | |||
| pbCfg := &pb.PushConfig{ | |||
| Attributes: pc.Attributes, | |||
| PushEndpoint: pc.Endpoint, | |||
| } | |||
| if authMethod := pc.AuthenticationMethod; authMethod != nil { | |||
| switch am := authMethod.(type) { | |||
| case *OIDCToken: | |||
| pbCfg.AuthenticationMethod = am.toProto() | |||
| default: // TODO: add others here when GAIC adds more definitions. | |||
| } | |||
| } | |||
| return pbCfg | |||
| } | |||
| // AuthenticationMethod is used by push points to verify the source of push requests. | |||
| // This interface defines fields that are part of a closed alpha that may not be accessible | |||
| // to all users. | |||
| type AuthenticationMethod interface { | |||
| isAuthMethod() bool | |||
| } | |||
| // OIDCToken allows PushConfigs to be authenticated using | |||
| // the OpenID Connect protocol https://openid.net/connect/ | |||
| type OIDCToken struct { | |||
| // Audience to be used when generating OIDC token. The audience claim | |||
| // identifies the recipients that the JWT is intended for. The audience | |||
| // value is a single case-sensitive string. Having multiple values (array) | |||
| // for the audience field is not supported. More info about the OIDC JWT | |||
| // token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 | |||
| // Note: if not specified, the Push endpoint URL will be used. | |||
| Audience string | |||
| // The service account email to be used for generating the OpenID Connect token. | |||
| // The caller of: | |||
| // * CreateSubscription | |||
| // * UpdateSubscription | |||
| // * ModifyPushConfig | |||
| // calls must have the iam.serviceAccounts.actAs permission for the service account. | |||
| // See https://cloud.google.com/iam/docs/understanding-roles#service-accounts-roles. | |||
| ServiceAccountEmail string | |||
| } | |||
| var _ AuthenticationMethod = (*OIDCToken)(nil) | |||
| func (oidcToken *OIDCToken) isAuthMethod() bool { return true } | |||
| func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ { | |||
| if oidcToken == nil { | |||
| return nil | |||
| } | |||
| return &pb.PushConfig_OidcToken_{ | |||
| OidcToken: &pb.PushConfig_OidcToken{ | |||
| Audience: oidcToken.Audience, | |||
| ServiceAccountEmail: oidcToken.ServiceAccountEmail, | |||
| }, | |||
| } | |||
| } | |||
| // SubscriptionConfig describes the configuration of a subscription. | |||
| type SubscriptionConfig struct { | |||
| Topic *Topic | |||
| PushConfig PushConfig | |||
| // The default maximum time after a subscriber receives a message before | |||
| // the subscriber should acknowledge the message. Note: messages which are | |||
| // obtained via Subscription.Receive need not be acknowledged within this | |||
| // deadline, as the deadline will be automatically extended. | |||
| AckDeadline time.Duration | |||
| // Whether to retain acknowledged messages. If true, acknowledged messages | |||
| // will not be expunged until they fall out of the RetentionDuration window. | |||
| RetainAckedMessages bool | |||
| // How long to retain messages in backlog, from the time of publish. If | |||
| // RetainAckedMessages is true, this duration affects the retention of | |||
| // acknowledged messages, otherwise only unacknowledged messages are retained. | |||
| // Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes. | |||
| RetentionDuration time.Duration | |||
| // Expiration policy specifies the conditions for a subscription's expiration. | |||
| // A subscription is considered active as long as any connected subscriber is | |||
| // successfully consuming messages from the subscription or is issuing | |||
| // operations on the subscription. If `expiration_policy` is not set, a | |||
| // *default policy* with `ttl` of 31 days will be used. The minimum allowed | |||
| // value for `expiration_policy.ttl` is 1 day. | |||
| // | |||
| // Use time.Duration(0) to indicate that the subscription should never expire. | |||
| // | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| ExpirationPolicy optional.Duration | |||
| // The set of labels for the subscription. | |||
| Labels map[string]string | |||
| } | |||
| func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { | |||
| var pbPushConfig *pb.PushConfig | |||
| if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 || cfg.PushConfig.AuthenticationMethod != nil { | |||
| pbPushConfig = cfg.PushConfig.toProto() | |||
| } | |||
| var retentionDuration *durpb.Duration | |||
| if cfg.RetentionDuration != 0 { | |||
| retentionDuration = ptypes.DurationProto(cfg.RetentionDuration) | |||
| } | |||
| return &pb.Subscription{ | |||
| Name: name, | |||
| Topic: cfg.Topic.name, | |||
| PushConfig: pbPushConfig, | |||
| AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), | |||
| RetainAckedMessages: cfg.RetainAckedMessages, | |||
| MessageRetentionDuration: retentionDuration, | |||
| Labels: cfg.Labels, | |||
| ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy), | |||
| } | |||
| } | |||
| func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) { | |||
| rd := time.Hour * 24 * 7 | |||
| var err error | |||
| if pbSub.MessageRetentionDuration != nil { | |||
| rd, err = ptypes.Duration(pbSub.MessageRetentionDuration) | |||
| if err != nil { | |||
| return SubscriptionConfig{}, err | |||
| } | |||
| } | |||
| var expirationPolicy time.Duration | |||
| if ttl := pbSub.ExpirationPolicy.GetTtl(); ttl != nil { | |||
| expirationPolicy, err = ptypes.Duration(ttl) | |||
| if err != nil { | |||
| return SubscriptionConfig{}, err | |||
| } | |||
| } | |||
| subC := SubscriptionConfig{ | |||
| Topic: newTopic(c, pbSub.Topic), | |||
| AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), | |||
| RetainAckedMessages: pbSub.RetainAckedMessages, | |||
| RetentionDuration: rd, | |||
| Labels: pbSub.Labels, | |||
| ExpirationPolicy: expirationPolicy, | |||
| } | |||
| pc := protoToPushConfig(pbSub.PushConfig) | |||
| if pc != nil { | |||
| subC.PushConfig = *pc | |||
| } | |||
| return subC, nil | |||
| } | |||
| func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig { | |||
| if pbPc == nil { | |||
| return nil | |||
| } | |||
| pc := &PushConfig{ | |||
| Endpoint: pbPc.PushEndpoint, | |||
| Attributes: pbPc.Attributes, | |||
| } | |||
| if am := pbPc.AuthenticationMethod; am != nil { | |||
| if oidcToken, ok := am.(*pb.PushConfig_OidcToken_); ok && oidcToken != nil && oidcToken.OidcToken != nil { | |||
| pc.AuthenticationMethod = &OIDCToken{ | |||
| Audience: oidcToken.OidcToken.GetAudience(), | |||
| ServiceAccountEmail: oidcToken.OidcToken.GetServiceAccountEmail(), | |||
| } | |||
| } | |||
| } | |||
| return pc | |||
| } | |||
| // ReceiveSettings configure the Receive method. | |||
| // A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. | |||
| type ReceiveSettings struct { | |||
| // MaxExtension is the maximum period for which the Subscription should | |||
| // automatically extend the ack deadline for each message. | |||
| // | |||
| // The Subscription will automatically extend the ack deadline of all | |||
| // fetched Messages up to the duration specified. Automatic deadline | |||
| // extension beyond the initial receipt may be disabled by specifying a | |||
| // duration less than 0. | |||
| MaxExtension time.Duration | |||
| // MaxOutstandingMessages is the maximum number of unprocessed messages | |||
| // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it | |||
| // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. | |||
| // If the value is negative, then there will be no limit on the number of | |||
| // unprocessed messages. | |||
| MaxOutstandingMessages int | |||
| // MaxOutstandingBytes is the maximum size of unprocessed messages | |||
| // (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will | |||
| // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If | |||
| // the value is negative, then there will be no limit on the number of bytes | |||
| // for unprocessed messages. | |||
| MaxOutstandingBytes int | |||
| // NumGoroutines is the number of goroutines Receive will spawn to pull | |||
| // messages concurrently. If NumGoroutines is less than 1, it will be treated | |||
| // as if it were DefaultReceiveSettings.NumGoroutines. | |||
| // | |||
| // NumGoroutines does not limit the number of messages that can be processed | |||
| // concurrently. Even with one goroutine, many messages might be processed at | |||
| // once, because that goroutine may continually receive messages and invoke the | |||
| // function passed to Receive on them. To limit the number of messages being | |||
| // processed concurrently, set MaxOutstandingMessages. | |||
| NumGoroutines int | |||
| // If Synchronous is true, then no more than MaxOutstandingMessages will be in | |||
| // memory at one time. (In contrast, when Synchronous is false, more than | |||
| // MaxOutstandingMessages may have been received from the service and in memory | |||
| // before being processed.) MaxOutstandingBytes still refers to the total bytes | |||
| // processed, rather than in memory. NumGoroutines is ignored. | |||
| // The default is false. | |||
| Synchronous bool | |||
| } | |||
| // For synchronous receive, the time to wait if we are already processing | |||
| // MaxOutstandingMessages. There is no point calling Pull and asking for zero | |||
| // messages, so we pause to allow some message-processing callbacks to finish. | |||
| // | |||
| // The wait time is large enough to avoid consuming significant CPU, but | |||
| // small enough to provide decent throughput. Users who want better | |||
| // throughput should not be using synchronous mode. | |||
| // | |||
| // Waiting might seem like polling, so it's natural to think we could do better by | |||
| // noticing when a callback is finished and immediately calling Pull. But if | |||
| // callbacks finish in quick succession, this will result in frequent Pull RPCs that | |||
| // request a single message, which wastes network bandwidth. Better to wait for a few | |||
| // callbacks to finish, so we make fewer RPCs fetching more messages. | |||
| // | |||
| // This value is unexported so the user doesn't have another knob to think about. Note that | |||
| // it is the same value as the one used for nackTicker, so it matches this client's | |||
| // idea of a duration that is short, but not so short that we perform excessive RPCs. | |||
| const synchronousWaitTime = 100 * time.Millisecond | |||
| // This is a var so that tests can change it. | |||
| var minAckDeadline = 10 * time.Second | |||
| // DefaultReceiveSettings holds the default values for ReceiveSettings. | |||
| var DefaultReceiveSettings = ReceiveSettings{ | |||
| MaxExtension: 10 * time.Minute, | |||
| MaxOutstandingMessages: 1000, | |||
| MaxOutstandingBytes: 1e9, // 1G | |||
| NumGoroutines: 1, | |||
| } | |||
| // Delete deletes the subscription. | |||
| func (s *Subscription) Delete(ctx context.Context) error { | |||
| return s.c.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: s.name}) | |||
| } | |||
| // Exists reports whether the subscription exists on the server. | |||
| func (s *Subscription) Exists(ctx context.Context) (bool, error) { | |||
| _, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name}) | |||
| if err == nil { | |||
| return true, nil | |||
| } | |||
| if status.Code(err) == codes.NotFound { | |||
| return false, nil | |||
| } | |||
| return false, err | |||
| } | |||
| // Config fetches the current configuration for the subscription. | |||
| func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { | |||
| pbSub, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name}) | |||
| if err != nil { | |||
| return SubscriptionConfig{}, err | |||
| } | |||
| cfg, err := protoToSubscriptionConfig(pbSub, s.c) | |||
| if err != nil { | |||
| return SubscriptionConfig{}, err | |||
| } | |||
| return cfg, nil | |||
| } | |||
| // SubscriptionConfigToUpdate describes how to update a subscription. | |||
| type SubscriptionConfigToUpdate struct { | |||
| // If non-nil, the push config is changed. | |||
| PushConfig *PushConfig | |||
| // If non-zero, the ack deadline is changed. | |||
| AckDeadline time.Duration | |||
| // If set, RetainAckedMessages is changed. | |||
| RetainAckedMessages optional.Bool | |||
| // If non-zero, RetentionDuration is changed. | |||
| RetentionDuration time.Duration | |||
| // If non-zero, Expiration is changed. | |||
| ExpirationPolicy optional.Duration | |||
| // If non-nil, the current set of labels is completely | |||
| // replaced by the new set. | |||
| // This field has beta status. It is not subject to the stability guarantee | |||
| // and may change. | |||
| Labels map[string]string | |||
| } | |||
| // Update changes an existing subscription according to the fields set in cfg. | |||
| // It returns the new SubscriptionConfig. | |||
| // | |||
| // Update returns an error if no fields were modified. | |||
| func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) { | |||
| req := s.updateRequest(&cfg) | |||
| if err := cfg.validate(); err != nil { | |||
| return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %v", err) | |||
| } | |||
| if len(req.UpdateMask.Paths) == 0 { | |||
| return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update") | |||
| } | |||
| rpsub, err := s.c.subc.UpdateSubscription(ctx, req) | |||
| if err != nil { | |||
| return SubscriptionConfig{}, err | |||
| } | |||
| return protoToSubscriptionConfig(rpsub, s.c) | |||
| } | |||
| func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.UpdateSubscriptionRequest { | |||
| psub := &pb.Subscription{Name: s.name} | |||
| var paths []string | |||
| if cfg.PushConfig != nil { | |||
| psub.PushConfig = cfg.PushConfig.toProto() | |||
| paths = append(paths, "push_config") | |||
| } | |||
| if cfg.AckDeadline != 0 { | |||
| psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds())) | |||
| paths = append(paths, "ack_deadline_seconds") | |||
| } | |||
| if cfg.RetainAckedMessages != nil { | |||
| psub.RetainAckedMessages = optional.ToBool(cfg.RetainAckedMessages) | |||
| paths = append(paths, "retain_acked_messages") | |||
| } | |||
| if cfg.RetentionDuration != 0 { | |||
| psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration) | |||
| paths = append(paths, "message_retention_duration") | |||
| } | |||
| if cfg.ExpirationPolicy != nil { | |||
| psub.ExpirationPolicy = expirationPolicyToProto(cfg.ExpirationPolicy) | |||
| paths = append(paths, "expiration_policy") | |||
| } | |||
| if cfg.Labels != nil { | |||
| psub.Labels = cfg.Labels | |||
| paths = append(paths, "labels") | |||
| } | |||
| return &pb.UpdateSubscriptionRequest{ | |||
| Subscription: psub, | |||
| UpdateMask: &fmpb.FieldMask{Paths: paths}, | |||
| } | |||
| } | |||
| const ( | |||
| // The minimum expiration policy duration is 1 day as per: | |||
| // https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L606-L607 | |||
| minExpirationPolicy = 24 * time.Hour | |||
| // If an expiration policy is not specified, the default of 31 days is used as per: | |||
| // https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L605-L606 | |||
| defaultExpirationPolicy = 31 * 24 * time.Hour | |||
| ) | |||
| func (cfg *SubscriptionConfigToUpdate) validate() error { | |||
| if cfg == nil || cfg.ExpirationPolicy == nil { | |||
| return nil | |||
| } | |||
| policy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy | |||
| if policy == 0 || policy >= min { | |||
| return nil | |||
| } | |||
| return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", policy, min) | |||
| } | |||
| func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationPolicy { | |||
| if expirationPolicy == nil { | |||
| return nil | |||
| } | |||
| dur := optional.ToDuration(expirationPolicy) | |||
| var ttl *durpb.Duration | |||
| // As per: | |||
| // https://godoc.org/google.golang.org/genproto/googleapis/pubsub/v1#ExpirationPolicy.Ttl | |||
| // if ExpirationPolicy.Ttl is set to nil, the expirationPolicy is toggled to NEVER expire. | |||
| if dur != 0 { | |||
| ttl = ptypes.DurationProto(dur) | |||
| } | |||
| return &pb.ExpirationPolicy{ | |||
| Ttl: ttl, | |||
| } | |||
| } | |||
| // IAM returns the subscription's IAM handle. | |||
| func (s *Subscription) IAM() *iam.Handle { | |||
| return iam.InternalNewHandle(s.c.subc.Connection(), s.name) | |||
| } | |||
| // CreateSubscription creates a new subscription on a topic. | |||
| // | |||
| // id is the name of the subscription to create. It must start with a letter, | |||
| // and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), | |||
| // underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It | |||
| // must be between 3 and 255 characters in length, and must not start with | |||
| // "goog". | |||
| // | |||
| // cfg.Topic is the topic from which the subscription should receive messages. It | |||
| // need not belong to the same project as the subscription. This field is required. | |||
| // | |||
| // cfg.AckDeadline is the maximum time after a subscriber receives a message before | |||
| // the subscriber should acknowledge the message. It must be between 10 and 600 | |||
| // seconds (inclusive), and is rounded down to the nearest second. If the | |||
| // provided ackDeadline is 0, then the default value of 10 seconds is used. | |||
| // Note: messages which are obtained via Subscription.Receive need not be | |||
| // acknowledged within this deadline, as the deadline will be automatically | |||
| // extended. | |||
| // | |||
| // cfg.PushConfig may be set to configure this subscription for push delivery. | |||
| // | |||
| // If the subscription already exists an error will be returned. | |||
| func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) { | |||
| if cfg.Topic == nil { | |||
| return nil, errors.New("pubsub: require non-nil Topic") | |||
| } | |||
| if cfg.AckDeadline == 0 { | |||
| cfg.AckDeadline = 10 * time.Second | |||
| } | |||
| if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second { | |||
| return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) | |||
| } | |||
| sub := c.Subscription(id) | |||
| _, err := c.subc.CreateSubscription(ctx, cfg.toProto(sub.name)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return sub, nil | |||
| } | |||
| var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription") | |||
| // Receive calls f with the outstanding messages from the subscription. | |||
| // It blocks until ctx is done, or the service returns a non-retryable error. | |||
| // | |||
| // The standard way to terminate a Receive is to cancel its context: | |||
| // | |||
| // cctx, cancel := context.WithCancel(ctx) | |||
| // err := sub.Receive(cctx, callback) | |||
| // // Call cancel from callback, or another goroutine. | |||
| // | |||
| // If the service returns a non-retryable error, Receive returns that error after | |||
| // all of the outstanding calls to f have returned. If ctx is done, Receive | |||
| // returns nil after all of the outstanding calls to f have returned and | |||
| // all messages have been acknowledged or have expired. | |||
| // | |||
| // Receive calls f concurrently from multiple goroutines. It is encouraged to | |||
| // process messages synchronously in f, even if that processing is relatively | |||
| // time-consuming; Receive will spawn new goroutines for incoming messages, | |||
| // limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings. | |||
| // | |||
| // The context passed to f will be canceled when ctx is Done or there is a | |||
| // fatal service error. | |||
| // | |||
| // Receive will send an ack deadline extension on message receipt, then | |||
| // automatically extend the ack deadline of all fetched Messages up to the | |||
| // period specified by s.ReceiveSettings.MaxExtension. | |||
| // | |||
| // Each Subscription may have only one invocation of Receive active at a time. | |||
| func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error { | |||
| s.mu.Lock() | |||
| if s.receiveActive { | |||
| s.mu.Unlock() | |||
| return errReceiveInProgress | |||
| } | |||
| s.receiveActive = true | |||
| s.mu.Unlock() | |||
| defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() | |||
| maxCount := s.ReceiveSettings.MaxOutstandingMessages | |||
| if maxCount == 0 { | |||
| maxCount = DefaultReceiveSettings.MaxOutstandingMessages | |||
| } | |||
| maxBytes := s.ReceiveSettings.MaxOutstandingBytes | |||
| if maxBytes == 0 { | |||
| maxBytes = DefaultReceiveSettings.MaxOutstandingBytes | |||
| } | |||
| maxExt := s.ReceiveSettings.MaxExtension | |||
| if maxExt == 0 { | |||
| maxExt = DefaultReceiveSettings.MaxExtension | |||
| } else if maxExt < 0 { | |||
| // If MaxExtension is negative, disable automatic extension. | |||
| maxExt = 0 | |||
| } | |||
| var numGoroutines int | |||
| switch { | |||
| case s.ReceiveSettings.Synchronous: | |||
| numGoroutines = 1 | |||
| case s.ReceiveSettings.NumGoroutines >= 1: | |||
| numGoroutines = s.ReceiveSettings.NumGoroutines | |||
| default: | |||
| numGoroutines = DefaultReceiveSettings.NumGoroutines | |||
| } | |||
| // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. | |||
| po := &pullOptions{ | |||
| maxExtension: maxExt, | |||
| maxPrefetch: trunc32(int64(maxCount)), | |||
| synchronous: s.ReceiveSettings.Synchronous, | |||
| } | |||
| fc := newFlowController(maxCount, maxBytes) | |||
| // Wait for all goroutines started by Receive to return, so instead of an | |||
| // obscure goroutine leak we have an obvious blocked call to Receive. | |||
| group, gctx := errgroup.WithContext(ctx) | |||
| for i := 0; i < numGoroutines; i++ { | |||
| group.Go(func() error { | |||
| return s.receive(gctx, po, fc, f) | |||
| }) | |||
| } | |||
| return group.Wait() | |||
| } | |||
| func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { | |||
| // Cancel a sub-context when we return, to kick the context-aware callbacks | |||
| // and the goroutine below. | |||
| ctx2, cancel := context.WithCancel(ctx) | |||
| // The iterator does not use the context passed to Receive. If it did, canceling | |||
| // that context would immediately stop the iterator without waiting for unacked | |||
| // messages. | |||
| iter := newMessageIterator(s.c.subc, s.name, po) | |||
| // We cannot use errgroup from Receive here. Receive might already be calling group.Wait, | |||
| // and group.Wait cannot be called concurrently with group.Go. We give each receive() its | |||
| // own WaitGroup instead. | |||
| // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed | |||
| // to be called after all Adds. | |||
| var wg sync.WaitGroup | |||
| wg.Add(1) | |||
| go func() { | |||
| <-ctx2.Done() | |||
| // Call stop when Receive's context is done. | |||
| // Stop will block until all outstanding messages have been acknowledged | |||
| // or there was a fatal service error. | |||
| iter.stop() | |||
| wg.Done() | |||
| }() | |||
| defer wg.Wait() | |||
| defer cancel() | |||
| for { | |||
| var maxToPull int32 // maximum number of messages to pull | |||
| if po.synchronous { | |||
| if po.maxPrefetch < 0 { | |||
| // If there is no limit on the number of messages to pull, use a reasonable default. | |||
| maxToPull = 1000 | |||
| } else { | |||
| // Limit the number of messages in memory to MaxOutstandingMessages | |||
| // (here, po.maxPrefetch). For each message currently in memory, we have | |||
| // called fc.acquire but not fc.release: this is fc.count(). The next | |||
| // call to Pull should fetch no more than the difference between these | |||
| // values. | |||
| maxToPull = po.maxPrefetch - int32(fc.count()) | |||
| if maxToPull <= 0 { | |||
| // Wait for some callbacks to finish. | |||
| if err := gax.Sleep(ctx, synchronousWaitTime); err != nil { | |||
| // Return nil if the context is done, not err. | |||
| return nil | |||
| } | |||
| continue | |||
| } | |||
| } | |||
| } | |||
| msgs, err := iter.receive(maxToPull) | |||
| if err == io.EOF { | |||
| return nil | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for i, msg := range msgs { | |||
| msg := msg | |||
| // TODO(jba): call acquire closer to when the message is allocated. | |||
| if err := fc.acquire(ctx, len(msg.Data)); err != nil { | |||
| // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. | |||
| for _, m := range msgs[i:] { | |||
| m.Nack() | |||
| } | |||
| // Return nil if the context is done, not err. | |||
| return nil | |||
| } | |||
| old := msg.doneFunc | |||
| msgLen := len(msg.Data) | |||
| msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) { | |||
| defer fc.release(msgLen) | |||
| old(ackID, ack, receiveTime) | |||
| } | |||
| wg.Add(1) | |||
| go func() { | |||
| defer wg.Done() | |||
| f(ctx2, msg) | |||
| }() | |||
| } | |||
| } | |||
| } | |||
| type pullOptions struct { | |||
| maxExtension time.Duration | |||
| maxPrefetch int32 | |||
| // If true, use unary Pull instead of StreamingPull, and never pull more | |||
| // than maxPrefetch messages. | |||
| synchronous bool | |||
| } | |||
| @@ -0,0 +1,550 @@ | |||
| // Copyright 2016 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "log" | |||
| "runtime" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "cloud.google.com/go/iam" | |||
| "github.com/golang/protobuf/proto" | |||
| gax "github.com/googleapis/gax-go/v2" | |||
| "go.opencensus.io/stats" | |||
| "go.opencensus.io/tag" | |||
| "google.golang.org/api/support/bundler" | |||
| pb "google.golang.org/genproto/googleapis/pubsub/v1" | |||
| fmpb "google.golang.org/genproto/protobuf/field_mask" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/codes" | |||
| "google.golang.org/grpc/status" | |||
| ) | |||
| const ( | |||
| // MaxPublishRequestCount is the maximum number of messages that can be in | |||
| // a single publish request, as defined by the PubSub service. | |||
| MaxPublishRequestCount = 1000 | |||
| // MaxPublishRequestBytes is the maximum size of a single publish request | |||
| // in bytes, as defined by the PubSub service. | |||
| MaxPublishRequestBytes = 1e7 | |||
| ) | |||
| // ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. | |||
| var ErrOversizedMessage = bundler.ErrOversizedItem | |||
| // Topic is a reference to a PubSub topic. | |||
| // | |||
| // The methods of Topic are safe for use by multiple goroutines. | |||
| type Topic struct { | |||
| c *Client | |||
| // The fully qualified identifier for the topic, in the format "projects/<projid>/topics/<name>" | |||
| name string | |||
| // Settings for publishing messages. All changes must be made before the | |||
| // first call to Publish. The default is DefaultPublishSettings. | |||
| PublishSettings PublishSettings | |||
| mu sync.RWMutex | |||
| stopped bool | |||
| bundler *bundler.Bundler | |||
| } | |||
| // PublishSettings control the bundling of published messages. | |||
| type PublishSettings struct { | |||
| // Publish a non-empty batch after this delay has passed. | |||
| DelayThreshold time.Duration | |||
| // Publish a batch when it has this many messages. The maximum is | |||
| // MaxPublishRequestCount. | |||
| CountThreshold int | |||
| // Publish a batch when its size in bytes reaches this value. | |||
| ByteThreshold int | |||
| // The number of goroutines that invoke the Publish RPC concurrently. | |||
| // | |||
| // Defaults to a multiple of GOMAXPROCS. | |||
| NumGoroutines int | |||
| // The maximum time that the client will attempt to publish a bundle of messages. | |||
| Timeout time.Duration | |||
| // The maximum number of bytes that the Bundler will keep in memory before | |||
| // returning ErrOverflow. | |||
| // | |||
| // Defaults to DefaultPublishSettings.BufferedByteLimit. | |||
| BufferedByteLimit int | |||
| } | |||
| // DefaultPublishSettings holds the default values for topics' PublishSettings. | |||
| var DefaultPublishSettings = PublishSettings{ | |||
| DelayThreshold: 1 * time.Millisecond, | |||
| CountThreshold: 100, | |||
| ByteThreshold: 1e6, | |||
| Timeout: 60 * time.Second, | |||
| // By default, limit the bundler to 10 times the max message size. The number 10 is | |||
| // chosen as a reasonable amount of messages in the worst case whilst still | |||
| // capping the number to a low enough value to not OOM users. | |||
| BufferedByteLimit: 10 * MaxPublishRequestBytes, | |||
| } | |||
| // CreateTopic creates a new topic. | |||
| // | |||
| // The specified topic ID must start with a letter, and contain only letters | |||
| // ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), | |||
| // tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 | |||
| // characters in length, and must not start with "goog". For more information, | |||
| // see: https://cloud.google.com/pubsub/docs/admin#resource_names | |||
| // | |||
| // If the topic already exists an error will be returned. | |||
| func (c *Client) CreateTopic(ctx context.Context, topicID string) (*Topic, error) { | |||
| t := c.Topic(topicID) | |||
| _, err := c.pubc.CreateTopic(ctx, &pb.Topic{Name: t.name}) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return t, nil | |||
| } | |||
| // CreateTopicWithConfig creates a topic from TopicConfig. | |||
| // | |||
| // The specified topic ID must start with a letter, and contain only letters | |||
| // ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), | |||
| // tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 | |||
| // characters in length, and must not start with "goog". For more information, | |||
| // see: https://cloud.google.com/pubsub/docs/admin#resource_names. | |||
| // | |||
| // If the topic already exists, an error will be returned. | |||
| func (c *Client) CreateTopicWithConfig(ctx context.Context, topicID string, tc *TopicConfig) (*Topic, error) { | |||
| t := c.Topic(topicID) | |||
| _, err := c.pubc.CreateTopic(ctx, &pb.Topic{ | |||
| Name: t.name, | |||
| Labels: tc.Labels, | |||
| MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy), | |||
| KmsKeyName: tc.KMSKeyName, | |||
| }) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return t, nil | |||
| } | |||
| // Topic creates a reference to a topic in the client's project. | |||
| // | |||
| // If a Topic's Publish method is called, it has background goroutines | |||
| // associated with it. Clean them up by calling Topic.Stop. | |||
| // | |||
| // Avoid creating many Topic instances if you use them to publish. | |||
| func (c *Client) Topic(id string) *Topic { | |||
| return c.TopicInProject(id, c.projectID) | |||
| } | |||
| // TopicInProject creates a reference to a topic in the given project. | |||
| // | |||
| // If a Topic's Publish method is called, it has background goroutines | |||
| // associated with it. Clean them up by calling Topic.Stop. | |||
| // | |||
| // Avoid creating many Topic instances if you use them to publish. | |||
| func (c *Client) TopicInProject(id, projectID string) *Topic { | |||
| return newTopic(c, fmt.Sprintf("projects/%s/topics/%s", projectID, id)) | |||
| } | |||
| func newTopic(c *Client, name string) *Topic { | |||
| return &Topic{ | |||
| c: c, | |||
| name: name, | |||
| PublishSettings: DefaultPublishSettings, | |||
| } | |||
| } | |||
| // TopicConfig describes the configuration of a topic. | |||
| type TopicConfig struct { | |||
| // The set of labels for the topic. | |||
| Labels map[string]string | |||
| // The topic's message storage policy. | |||
| MessageStoragePolicy MessageStoragePolicy | |||
| // The name of the Cloud KMS key to be used to protect access to messages | |||
| // published to this topic, in the format | |||
| // "projects/P/locations/L/keyRings/R/cryptoKeys/K". | |||
| KMSKeyName string | |||
| } | |||
| // TopicConfigToUpdate describes how to update a topic. | |||
| type TopicConfigToUpdate struct { | |||
| // If non-nil, the current set of labels is completely | |||
| // replaced by the new set. | |||
| Labels map[string]string | |||
| // If non-nil, the existing policy (containing the list of regions) | |||
| // is completely replaced by the new policy. | |||
| // | |||
| // Use the zero value &MessageStoragePolicy{} to reset the topic back to | |||
| // using the organization's Resource Location Restriction policy. | |||
| // | |||
| // If nil, the policy remains unchanged. | |||
| // | |||
| // This field has beta status. It is not subject to the stability guarantee | |||
| // and may change. | |||
| MessageStoragePolicy *MessageStoragePolicy | |||
| } | |||
| func protoToTopicConfig(pbt *pb.Topic) TopicConfig { | |||
| return TopicConfig{ | |||
| Labels: pbt.Labels, | |||
| MessageStoragePolicy: protoToMessageStoragePolicy(pbt.MessageStoragePolicy), | |||
| KMSKeyName: pbt.KmsKeyName, | |||
| } | |||
| } | |||
| // MessageStoragePolicy constrains how messages published to the topic may be stored. It | |||
| // is determined when the topic is created based on the policy configured at | |||
| // the project level. | |||
| type MessageStoragePolicy struct { | |||
| // AllowedPersistenceRegions is the list of GCP regions where messages that are published | |||
| // to the topic may be persisted in storage. Messages published by publishers running in | |||
| // non-allowed GCP regions (or running outside of GCP altogether) will be | |||
| // routed for storage in one of the allowed regions. | |||
| // | |||
| // If empty, it indicates a misconfiguration at the project or organization level, which | |||
| // will result in all Publish operations failing. This field cannot be empty in updates. | |||
| // | |||
| // If nil, then the policy is not defined on a topic level. When used in updates, it resets | |||
| // the regions back to the organization level Resource Location Restriction policy. | |||
| // | |||
| // For more information, see | |||
| // https://cloud.google.com/pubsub/docs/resource-location-restriction#pubsub-storage-locations. | |||
| AllowedPersistenceRegions []string | |||
| } | |||
| func protoToMessageStoragePolicy(msp *pb.MessageStoragePolicy) MessageStoragePolicy { | |||
| if msp == nil { | |||
| return MessageStoragePolicy{} | |||
| } | |||
| return MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions} | |||
| } | |||
| func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePolicy { | |||
| if msp == nil || msp.AllowedPersistenceRegions == nil { | |||
| return nil | |||
| } | |||
| return &pb.MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions} | |||
| } | |||
| // Config returns the TopicConfig for the topic. | |||
| func (t *Topic) Config(ctx context.Context) (TopicConfig, error) { | |||
| pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) | |||
| if err != nil { | |||
| return TopicConfig{}, err | |||
| } | |||
| return protoToTopicConfig(pbt), nil | |||
| } | |||
| // Update changes an existing topic according to the fields set in cfg. It returns | |||
| // the new TopicConfig. | |||
| func (t *Topic) Update(ctx context.Context, cfg TopicConfigToUpdate) (TopicConfig, error) { | |||
| req := t.updateRequest(cfg) | |||
| if len(req.UpdateMask.Paths) == 0 { | |||
| return TopicConfig{}, errors.New("pubsub: UpdateTopic call with nothing to update") | |||
| } | |||
| rpt, err := t.c.pubc.UpdateTopic(ctx, req) | |||
| if err != nil { | |||
| return TopicConfig{}, err | |||
| } | |||
| return protoToTopicConfig(rpt), nil | |||
| } | |||
| func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { | |||
| pt := &pb.Topic{Name: t.name} | |||
| var paths []string | |||
| if cfg.Labels != nil { | |||
| pt.Labels = cfg.Labels | |||
| paths = append(paths, "labels") | |||
| } | |||
| if cfg.MessageStoragePolicy != nil { | |||
| pt.MessageStoragePolicy = messageStoragePolicyToProto(cfg.MessageStoragePolicy) | |||
| paths = append(paths, "message_storage_policy") | |||
| } | |||
| return &pb.UpdateTopicRequest{ | |||
| Topic: pt, | |||
| UpdateMask: &fmpb.FieldMask{Paths: paths}, | |||
| } | |||
| } | |||
| // Topics returns an iterator which returns all of the topics for the client's project. | |||
| func (c *Client) Topics(ctx context.Context) *TopicIterator { | |||
| it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()}) | |||
| return &TopicIterator{ | |||
| c: c, | |||
| next: func() (string, error) { | |||
| topic, err := it.Next() | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| return topic.Name, nil | |||
| }, | |||
| } | |||
| } | |||
| // TopicIterator is an iterator that returns a series of topics. | |||
| type TopicIterator struct { | |||
| c *Client | |||
| next func() (string, error) | |||
| } | |||
| // Next returns the next topic. If there are no more topics, iterator.Done will be returned. | |||
| func (tps *TopicIterator) Next() (*Topic, error) { | |||
| topicName, err := tps.next() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return newTopic(tps.c, topicName), nil | |||
| } | |||
| // ID returns the unique identifier of the topic within its project. | |||
| func (t *Topic) ID() string { | |||
| slash := strings.LastIndex(t.name, "/") | |||
| if slash == -1 { | |||
| // name is not a fully-qualified name. | |||
| panic("bad topic name") | |||
| } | |||
| return t.name[slash+1:] | |||
| } | |||
| // String returns the printable globally unique name for the topic. | |||
| func (t *Topic) String() string { | |||
| return t.name | |||
| } | |||
| // Delete deletes the topic. | |||
| func (t *Topic) Delete(ctx context.Context) error { | |||
| return t.c.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: t.name}) | |||
| } | |||
| // Exists reports whether the topic exists on the server. | |||
| func (t *Topic) Exists(ctx context.Context) (bool, error) { | |||
| if t.name == "_deleted-topic_" { | |||
| return false, nil | |||
| } | |||
| _, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) | |||
| if err == nil { | |||
| return true, nil | |||
| } | |||
| if status.Code(err) == codes.NotFound { | |||
| return false, nil | |||
| } | |||
| return false, err | |||
| } | |||
| // IAM returns the topic's IAM handle. | |||
| func (t *Topic) IAM() *iam.Handle { | |||
| return iam.InternalNewHandle(t.c.pubc.Connection(), t.name) | |||
| } | |||
| // Subscriptions returns an iterator which returns the subscriptions for this topic. | |||
| // | |||
| // Some of the returned subscriptions may belong to a project other than t. | |||
| func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { | |||
| it := t.c.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{ | |||
| Topic: t.name, | |||
| }) | |||
| return &SubscriptionIterator{ | |||
| c: t.c, | |||
| next: it.Next, | |||
| } | |||
| } | |||
| var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") | |||
| // Publish publishes msg to the topic asynchronously. Messages are batched and | |||
| // sent according to the topic's PublishSettings. Publish never blocks. | |||
| // | |||
| // Publish returns a non-nil PublishResult which will be ready when the | |||
| // message has been sent (or has failed to be sent) to the server. | |||
| // | |||
| // Publish creates goroutines for batching and sending messages. These goroutines | |||
| // need to be stopped by calling t.Stop(). Once stopped, future calls to Publish | |||
| // will immediately return a PublishResult with an error. | |||
| func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { | |||
| // TODO(jba): if this turns out to take significant time, try to approximate it. | |||
| // Or, convert the messages to protos in Publish, instead of in the service. | |||
| msg.size = proto.Size(&pb.PubsubMessage{ | |||
| Data: msg.Data, | |||
| Attributes: msg.Attributes, | |||
| }) | |||
| r := &PublishResult{ready: make(chan struct{})} | |||
| t.initBundler() | |||
| t.mu.RLock() | |||
| defer t.mu.RUnlock() | |||
| // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here | |||
| if t.stopped { | |||
| r.set("", errTopicStopped) | |||
| return r | |||
| } | |||
| // TODO(jba) [from bcmills] consider using a shared channel per bundle | |||
| // (requires Bundler API changes; would reduce allocations) | |||
| err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) | |||
| if err != nil { | |||
| r.set("", err) | |||
| } | |||
| return r | |||
| } | |||
| // Stop sends all remaining published messages and stop goroutines created for handling | |||
| // publishing. Returns once all outstanding messages have been sent or have | |||
| // failed to be sent. | |||
| func (t *Topic) Stop() { | |||
| t.mu.Lock() | |||
| noop := t.stopped || t.bundler == nil | |||
| t.stopped = true | |||
| t.mu.Unlock() | |||
| if noop { | |||
| return | |||
| } | |||
| t.bundler.Flush() | |||
| } | |||
| // A PublishResult holds the result from a call to Publish. | |||
| type PublishResult struct { | |||
| ready chan struct{} | |||
| serverID string | |||
| err error | |||
| } | |||
| // Ready returns a channel that is closed when the result is ready. | |||
| // When the Ready channel is closed, Get is guaranteed not to block. | |||
| func (r *PublishResult) Ready() <-chan struct{} { return r.ready } | |||
| // Get returns the server-generated message ID and/or error result of a Publish call. | |||
| // Get blocks until the Publish call completes or the context is done. | |||
| func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { | |||
| // If the result is already ready, return it even if the context is done. | |||
| select { | |||
| case <-r.Ready(): | |||
| return r.serverID, r.err | |||
| default: | |||
| } | |||
| select { | |||
| case <-ctx.Done(): | |||
| return "", ctx.Err() | |||
| case <-r.Ready(): | |||
| return r.serverID, r.err | |||
| } | |||
| } | |||
| func (r *PublishResult) set(sid string, err error) { | |||
| r.serverID = sid | |||
| r.err = err | |||
| close(r.ready) | |||
| } | |||
| type bundledMessage struct { | |||
| msg *Message | |||
| res *PublishResult | |||
| } | |||
| func (t *Topic) initBundler() { | |||
| t.mu.RLock() | |||
| noop := t.stopped || t.bundler != nil | |||
| t.mu.RUnlock() | |||
| if noop { | |||
| return | |||
| } | |||
| t.mu.Lock() | |||
| defer t.mu.Unlock() | |||
| // Must re-check, since we released the lock. | |||
| if t.stopped || t.bundler != nil { | |||
| return | |||
| } | |||
| timeout := t.PublishSettings.Timeout | |||
| t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { | |||
| // TODO(jba): use a context detached from the one passed to NewClient. | |||
| ctx := context.TODO() | |||
| if timeout != 0 { | |||
| var cancel func() | |||
| ctx, cancel = context.WithTimeout(ctx, timeout) | |||
| defer cancel() | |||
| } | |||
| t.publishMessageBundle(ctx, items.([]*bundledMessage)) | |||
| }) | |||
| t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold | |||
| t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold | |||
| if t.bundler.BundleCountThreshold > MaxPublishRequestCount { | |||
| t.bundler.BundleCountThreshold = MaxPublishRequestCount | |||
| } | |||
| t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold | |||
| bufferedByteLimit := DefaultPublishSettings.BufferedByteLimit | |||
| if t.PublishSettings.BufferedByteLimit > 0 { | |||
| bufferedByteLimit = t.PublishSettings.BufferedByteLimit | |||
| } | |||
| t.bundler.BufferedByteLimit = bufferedByteLimit | |||
| t.bundler.BundleByteLimit = MaxPublishRequestBytes | |||
| // Unless overridden, allow many goroutines per CPU to call the Publish RPC concurrently. | |||
| // The default value was determined via extensive load testing (see the loadtest subdirectory). | |||
| if t.PublishSettings.NumGoroutines > 0 { | |||
| t.bundler.HandlerLimit = t.PublishSettings.NumGoroutines | |||
| } else { | |||
| t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0) | |||
| } | |||
| } | |||
| func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { | |||
| ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) | |||
| if err != nil { | |||
| log.Printf("pubsub: cannot create context with tag in publishMessageBundle: %v", err) | |||
| } | |||
| pbMsgs := make([]*pb.PubsubMessage, len(bms)) | |||
| for i, bm := range bms { | |||
| pbMsgs[i] = &pb.PubsubMessage{ | |||
| Data: bm.msg.Data, | |||
| Attributes: bm.msg.Attributes, | |||
| } | |||
| bm.msg = nil // release bm.msg for GC | |||
| } | |||
| start := time.Now() | |||
| res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{ | |||
| Topic: t.name, | |||
| Messages: pbMsgs, | |||
| }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes))) | |||
| end := time.Now() | |||
| if err != nil { | |||
| // Update context with error tag for OpenCensus, | |||
| // using same stats.Record() call as success case. | |||
| ctx, _ = tag.New(ctx, tag.Upsert(keyStatus, "ERROR"), | |||
| tag.Upsert(keyError, err.Error())) | |||
| } | |||
| stats.Record(ctx, | |||
| PublishLatency.M(float64(end.Sub(start)/time.Millisecond)), | |||
| PublishedMessages.M(int64(len(bms)))) | |||
| for i, bm := range bms { | |||
| if err != nil { | |||
| bm.res.set("", err) | |||
| } else { | |||
| bm.res.set(res.MessageIds[i], nil) | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,217 @@ | |||
| // Copyright 2018 Google LLC | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package pubsub | |||
| import ( | |||
| "context" | |||
| "log" | |||
| "sync" | |||
| "go.opencensus.io/plugin/ocgrpc" | |||
| "go.opencensus.io/stats" | |||
| "go.opencensus.io/stats/view" | |||
| "go.opencensus.io/tag" | |||
| "google.golang.org/api/option" | |||
| "google.golang.org/grpc" | |||
| ) | |||
| func openCensusOptions() []option.ClientOption { | |||
| return []option.ClientOption{ | |||
| option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), | |||
| } | |||
| } | |||
| // The following keys are used to tag requests with a specific topic/subscription ID. | |||
| var ( | |||
| keyTopic = tag.MustNewKey("topic") | |||
| keySubscription = tag.MustNewKey("subscription") | |||
| ) | |||
| // In the following, errors are used if status is not "OK". | |||
| var ( | |||
| keyStatus = tag.MustNewKey("status") | |||
| keyError = tag.MustNewKey("error") | |||
| ) | |||
| const statsPrefix = "cloud.google.com/go/pubsub/" | |||
| // The following are measures recorded in publish/subscribe flows. | |||
| var ( | |||
| // PublishedMessages is a measure of the number of messages published, which may include errors. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| PublishedMessages = stats.Int64(statsPrefix+"published_messages", "Number of PubSub message published", stats.UnitDimensionless) | |||
| // PublishLatency is a measure of the number of milliseconds it took to publish a bundle, | |||
| // which may consist of one or more messages. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| PublishLatency = stats.Float64(statsPrefix+"publish_roundtrip_latency", "The latency in milliseconds per publish batch", stats.UnitMilliseconds) | |||
| // PullCount is a measure of the number of messages pulled. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| PullCount = stats.Int64(statsPrefix+"pull_count", "Number of PubSub messages pulled", stats.UnitDimensionless) | |||
| // AckCount is a measure of the number of messages acked. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| AckCount = stats.Int64(statsPrefix+"ack_count", "Number of PubSub messages acked", stats.UnitDimensionless) | |||
| // NackCount is a measure of the number of messages nacked. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| NackCount = stats.Int64(statsPrefix+"nack_count", "Number of PubSub messages nacked", stats.UnitDimensionless) | |||
| // ModAckCount is a measure of the number of messages whose ack-deadline was modified. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| ModAckCount = stats.Int64(statsPrefix+"mod_ack_count", "Number of ack-deadlines modified", stats.UnitDimensionless) | |||
| // ModAckTimeoutCount is a measure of the number ModifyAckDeadline RPCs that timed out. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| ModAckTimeoutCount = stats.Int64(statsPrefix+"mod_ack_timeout_count", "Number of ModifyAckDeadline RPCs that timed out", stats.UnitDimensionless) | |||
| // StreamOpenCount is a measure of the number of times a streaming-pull stream was opened. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamOpenCount = stats.Int64(statsPrefix+"stream_open_count", "Number of calls opening a new streaming pull", stats.UnitDimensionless) | |||
| // StreamRetryCount is a measure of the number of times a streaming-pull operation was retried. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamRetryCount = stats.Int64(statsPrefix+"stream_retry_count", "Number of retries of a stream send or receive", stats.UnitDimensionless) | |||
| // StreamRequestCount is a measure of the number of requests sent on a streaming-pull stream. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamRequestCount = stats.Int64(statsPrefix+"stream_request_count", "Number gRPC StreamingPull request messages sent", stats.UnitDimensionless) | |||
| // StreamResponseCount is a measure of the number of responses received on a streaming-pull stream. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitDimensionless) | |||
| ) | |||
| var ( | |||
| // PublishedMessagesView is a cumulative sum of PublishedMessages. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| PublishedMessagesView *view.View | |||
| // PublishLatencyView is a distribution of PublishLatency. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| PublishLatencyView *view.View | |||
| // PullCountView is a cumulative sum of PullCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| PullCountView *view.View | |||
| // AckCountView is a cumulative sum of AckCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| AckCountView *view.View | |||
| // NackCountView is a cumulative sum of NackCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| NackCountView *view.View | |||
| // ModAckCountView is a cumulative sum of ModAckCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| ModAckCountView *view.View | |||
| // ModAckTimeoutCountView is a cumulative sum of ModAckTimeoutCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| ModAckTimeoutCountView *view.View | |||
| // StreamOpenCountView is a cumulative sum of StreamOpenCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamOpenCountView *view.View | |||
| // StreamRetryCountView is a cumulative sum of StreamRetryCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamRetryCountView *view.View | |||
| // StreamRequestCountView is a cumulative sum of StreamRequestCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamRequestCountView *view.View | |||
| // StreamResponseCountView is a cumulative sum of StreamResponseCount. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| StreamResponseCountView *view.View | |||
| ) | |||
| func init() { | |||
| PublishedMessagesView = createCountView(stats.Measure(PublishedMessages), keyTopic, keyStatus, keyError) | |||
| PublishLatencyView = createDistView(PublishLatency, keyTopic, keyStatus, keyError) | |||
| PullCountView = createCountView(PullCount, keySubscription) | |||
| AckCountView = createCountView(AckCount, keySubscription) | |||
| NackCountView = createCountView(NackCount, keySubscription) | |||
| ModAckCountView = createCountView(ModAckCount, keySubscription) | |||
| ModAckTimeoutCountView = createCountView(ModAckTimeoutCount, keySubscription) | |||
| StreamOpenCountView = createCountView(StreamOpenCount, keySubscription) | |||
| StreamRetryCountView = createCountView(StreamRetryCount, keySubscription) | |||
| StreamRequestCountView = createCountView(StreamRequestCount, keySubscription) | |||
| StreamResponseCountView = createCountView(StreamResponseCount, keySubscription) | |||
| DefaultPublishViews = []*view.View{ | |||
| PublishedMessagesView, | |||
| PublishLatencyView, | |||
| } | |||
| DefaultSubscribeViews = []*view.View{ | |||
| PullCountView, | |||
| AckCountView, | |||
| NackCountView, | |||
| ModAckCountView, | |||
| ModAckTimeoutCountView, | |||
| StreamOpenCountView, | |||
| StreamRetryCountView, | |||
| StreamRequestCountView, | |||
| StreamResponseCountView, | |||
| } | |||
| } | |||
| // The following arrays are the default views related to publish/subscribe operations provided by this package. | |||
| // It is EXPERIMENTAL and subject to change or removal without notice. | |||
| var ( | |||
| DefaultPublishViews []*view.View | |||
| DefaultSubscribeViews []*view.View | |||
| ) | |||
| func createCountView(m stats.Measure, keys ...tag.Key) *view.View { | |||
| return &view.View{ | |||
| Name: m.Name(), | |||
| Description: m.Description(), | |||
| TagKeys: keys, | |||
| Measure: m, | |||
| Aggregation: view.Sum(), | |||
| } | |||
| } | |||
| func createDistView(m stats.Measure, keys ...tag.Key) *view.View { | |||
| return &view.View{ | |||
| Name: m.Name(), | |||
| Description: m.Description(), | |||
| TagKeys: keys, | |||
| Measure: m, | |||
| Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), | |||
| } | |||
| } | |||
| var logOnce sync.Once | |||
| // withSubscriptionKey returns a new context modified with the subscriptionKey tag map. | |||
| func withSubscriptionKey(ctx context.Context, subName string) context.Context { | |||
| ctx, err := tag.New(ctx, tag.Upsert(keySubscription, subName)) | |||
| if err != nil { | |||
| logOnce.Do(func() { | |||
| log.Printf("pubsub: error creating tag map for 'subscribe' key: %v", err) | |||
| }) | |||
| } | |||
| return ctx | |||
| } | |||
| func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) { | |||
| stats.Record(ctx, m.M(n)) | |||
| } | |||
| @@ -0,0 +1 @@ | |||
| coverage* | |||
| @@ -0,0 +1,17 @@ | |||
| --- | |||
| language: go | |||
| go: | |||
| - 1.11.x | |||
| env: | |||
| - GO111MODULE=on | |||
| services: | |||
| - docker | |||
| script: | |||
| - make test-with-coverage | |||
| after_success: | |||
| - bash <(curl -s https://codecov.io/bash) | |||
| @@ -0,0 +1,354 @@ | |||
| Mozilla Public License, version 2.0 | |||
| 1. Definitions | |||
| 1.1. “Contributor” | |||
| means each individual or legal entity that creates, contributes to the | |||
| creation of, or owns Covered Software. | |||
| 1.2. “Contributor Version” | |||
| means the combination of the Contributions of others (if any) used by a | |||
| Contributor and that particular Contributor’s Contribution. | |||
| 1.3. “Contribution” | |||
| means Covered Software of a particular Contributor. | |||
| 1.4. “Covered Software” | |||
| means Source Code Form to which the initial Contributor has attached the | |||
| notice in Exhibit A, the Executable Form of such Source Code Form, and | |||
| Modifications of such Source Code Form, in each case including portions | |||
| thereof. | |||
| 1.5. “Incompatible With Secondary Licenses” | |||
| means | |||
| a. that the initial Contributor has attached the notice described in | |||
| Exhibit B to the Covered Software; or | |||
| b. that the Covered Software was made available under the terms of version | |||
| 1.1 or earlier of the License, but not also under the terms of a | |||
| Secondary License. | |||
| 1.6. “Executable Form” | |||
| means any form of the work other than Source Code Form. | |||
| 1.7. “Larger Work” | |||
| means a work that combines Covered Software with other material, in a separate | |||
| file or files, that is not Covered Software. | |||
| 1.8. “License” | |||
| means this document. | |||
| 1.9. “Licensable” | |||
| means having the right to grant, to the maximum extent possible, whether at the | |||
| time of the initial grant or subsequently, any and all of the rights conveyed by | |||
| this License. | |||
| 1.10. “Modifications” | |||
| means any of the following: | |||
| a. any file in Source Code Form that results from an addition to, deletion | |||
| from, or modification of the contents of Covered Software; or | |||
| b. any new file in Source Code Form that contains any Covered Software. | |||
| 1.11. “Patent Claims” of a Contributor | |||
| means any patent claim(s), including without limitation, method, process, | |||
| and apparatus claims, in any patent Licensable by such Contributor that | |||
| would be infringed, but for the grant of the License, by the making, | |||
| using, selling, offering for sale, having made, import, or transfer of | |||
| either its Contributions or its Contributor Version. | |||
| 1.12. “Secondary License” | |||
| means either the GNU General Public License, Version 2.0, the GNU Lesser | |||
| General Public License, Version 2.1, the GNU Affero General Public | |||
| License, Version 3.0, or any later versions of those licenses. | |||
| 1.13. “Source Code Form” | |||
| means the form of the work preferred for making modifications. | |||
| 1.14. “You” (or “Your”) | |||
| means an individual or a legal entity exercising rights under this | |||
| License. For legal entities, “You” includes any entity that controls, is | |||
| controlled by, or is under common control with You. For purposes of this | |||
| definition, “control” means (a) the power, direct or indirect, to cause | |||
| the direction or management of such entity, whether by contract or | |||
| otherwise, or (b) ownership of more than fifty percent (50%) of the | |||
| outstanding shares or beneficial ownership of such entity. | |||
| 2. License Grants and Conditions | |||
| 2.1. Grants | |||
| Each Contributor hereby grants You a world-wide, royalty-free, | |||
| non-exclusive license: | |||
| a. under intellectual property rights (other than patent or trademark) | |||
| Licensable by such Contributor to use, reproduce, make available, | |||
| modify, display, perform, distribute, and otherwise exploit its | |||
| Contributions, either on an unmodified basis, with Modifications, or as | |||
| part of a Larger Work; and | |||
| b. under Patent Claims of such Contributor to make, use, sell, offer for | |||
| sale, have made, import, and otherwise transfer either its Contributions | |||
| or its Contributor Version. | |||
| 2.2. Effective Date | |||
| The licenses granted in Section 2.1 with respect to any Contribution become | |||
| effective for each Contribution on the date the Contributor first distributes | |||
| such Contribution. | |||
| 2.3. Limitations on Grant Scope | |||
| The licenses granted in this Section 2 are the only rights granted under this | |||
| License. No additional rights or licenses will be implied from the distribution | |||
| or licensing of Covered Software under this License. Notwithstanding Section | |||
| 2.1(b) above, no patent license is granted by a Contributor: | |||
| a. for any code that a Contributor has removed from Covered Software; or | |||
| b. for infringements caused by: (i) Your and any other third party’s | |||
| modifications of Covered Software, or (ii) the combination of its | |||
| Contributions with other software (except as part of its Contributor | |||
| Version); or | |||
| c. under Patent Claims infringed by Covered Software in the absence of its | |||
| Contributions. | |||
| This License does not grant any rights in the trademarks, service marks, or | |||
| logos of any Contributor (except as may be necessary to comply with the | |||
| notice requirements in Section 3.4). | |||
| 2.4. Subsequent Licenses | |||
| No Contributor makes additional grants as a result of Your choice to | |||
| distribute the Covered Software under a subsequent version of this License | |||
| (see Section 10.2) or under the terms of a Secondary License (if permitted | |||
| under the terms of Section 3.3). | |||
| 2.5. Representation | |||
| Each Contributor represents that the Contributor believes its Contributions | |||
| are its original creation(s) or it has sufficient rights to grant the | |||
| rights to its Contributions conveyed by this License. | |||
| 2.6. Fair Use | |||
| This License is not intended to limit any rights You have under applicable | |||
| copyright doctrines of fair use, fair dealing, or other equivalents. | |||
| 2.7. Conditions | |||
| Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in | |||
| Section 2.1. | |||
| 3. Responsibilities | |||
| 3.1. Distribution of Source Form | |||
| All distribution of Covered Software in Source Code Form, including any | |||
| Modifications that You create or to which You contribute, must be under the | |||
| terms of this License. You must inform recipients that the Source Code Form | |||
| of the Covered Software is governed by the terms of this License, and how | |||
| they can obtain a copy of this License. You may not attempt to alter or | |||
| restrict the recipients’ rights in the Source Code Form. | |||
| 3.2. Distribution of Executable Form | |||
| If You distribute Covered Software in Executable Form then: | |||
| a. such Covered Software must also be made available in Source Code Form, | |||
| as described in Section 3.1, and You must inform recipients of the | |||
| Executable Form how they can obtain a copy of such Source Code Form by | |||
| reasonable means in a timely manner, at a charge no more than the cost | |||
| of distribution to the recipient; and | |||
| b. You may distribute such Executable Form under the terms of this License, | |||
| or sublicense it under different terms, provided that the license for | |||
| the Executable Form does not attempt to limit or alter the recipients’ | |||
| rights in the Source Code Form under this License. | |||
| 3.3. Distribution of a Larger Work | |||
| You may create and distribute a Larger Work under terms of Your choice, | |||
| provided that You also comply with the requirements of this License for the | |||
| Covered Software. If the Larger Work is a combination of Covered Software | |||
| with a work governed by one or more Secondary Licenses, and the Covered | |||
| Software is not Incompatible With Secondary Licenses, this License permits | |||
| You to additionally distribute such Covered Software under the terms of | |||
| such Secondary License(s), so that the recipient of the Larger Work may, at | |||
| their option, further distribute the Covered Software under the terms of | |||
| either this License or such Secondary License(s). | |||
| 3.4. Notices | |||
| You may not remove or alter the substance of any license notices (including | |||
| copyright notices, patent notices, disclaimers of warranty, or limitations | |||
| of liability) contained within the Source Code Form of the Covered | |||
| Software, except that You may alter any license notices to the extent | |||
| required to remedy known factual inaccuracies. | |||
| 3.5. Application of Additional Terms | |||
| You may choose to offer, and to charge a fee for, warranty, support, | |||
| indemnity or liability obligations to one or more recipients of Covered | |||
| Software. However, You may do so only on Your own behalf, and not on behalf | |||
| of any Contributor. You must make it absolutely clear that any such | |||
| warranty, support, indemnity, or liability obligation is offered by You | |||
| alone, and You hereby agree to indemnify every Contributor for any | |||
| liability incurred by such Contributor as a result of warranty, support, | |||
| indemnity or liability terms You offer. You may include additional | |||
| disclaimers of warranty and limitations of liability specific to any | |||
| jurisdiction. | |||
| 4. Inability to Comply Due to Statute or Regulation | |||
| If it is impossible for You to comply with any of the terms of this License | |||
| with respect to some or all of the Covered Software due to statute, judicial | |||
| order, or regulation then You must: (a) comply with the terms of this License | |||
| to the maximum extent possible; and (b) describe the limitations and the code | |||
| they affect. Such description must be placed in a text file included with all | |||
| distributions of the Covered Software under this License. Except to the | |||
| extent prohibited by statute or regulation, such description must be | |||
| sufficiently detailed for a recipient of ordinary skill to be able to | |||
| understand it. | |||
| 5. Termination | |||
| 5.1. The rights granted under this License will terminate automatically if You | |||
| fail to comply with any of its terms. However, if You become compliant, | |||
| then the rights granted under this License from a particular Contributor | |||
| are reinstated (a) provisionally, unless and until such Contributor | |||
| explicitly and finally terminates Your grants, and (b) on an ongoing basis, | |||
| if such Contributor fails to notify You of the non-compliance by some | |||
| reasonable means prior to 60 days after You have come back into compliance. | |||
| Moreover, Your grants from a particular Contributor are reinstated on an | |||
| ongoing basis if such Contributor notifies You of the non-compliance by | |||
| some reasonable means, this is the first time You have received notice of | |||
| non-compliance with this License from such Contributor, and You become | |||
| compliant prior to 30 days after Your receipt of the notice. | |||
| 5.2. If You initiate litigation against any entity by asserting a patent | |||
| infringement claim (excluding declaratory judgment actions, counter-claims, | |||
| and cross-claims) alleging that a Contributor Version directly or | |||
| indirectly infringes any patent, then the rights granted to You by any and | |||
| all Contributors for the Covered Software under Section 2.1 of this License | |||
| shall terminate. | |||
| 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user | |||
| license agreements (excluding distributors and resellers) which have been | |||
| validly granted by You or Your distributors under this License prior to | |||
| termination shall survive termination. | |||
| 6. Disclaimer of Warranty | |||
| Covered Software is provided under this License on an “as is” basis, without | |||
| warranty of any kind, either expressed, implied, or statutory, including, | |||
| without limitation, warranties that the Covered Software is free of defects, | |||
| merchantable, fit for a particular purpose or non-infringing. The entire | |||
| risk as to the quality and performance of the Covered Software is with You. | |||
| Should any Covered Software prove defective in any respect, You (not any | |||
| Contributor) assume the cost of any necessary servicing, repair, or | |||
| correction. This disclaimer of warranty constitutes an essential part of this | |||
| License. No use of any Covered Software is authorized under this License | |||
| except under this disclaimer. | |||
| 7. Limitation of Liability | |||
| Under no circumstances and under no legal theory, whether tort (including | |||
| negligence), contract, or otherwise, shall any Contributor, or anyone who | |||
| distributes Covered Software as permitted above, be liable to You for any | |||
| direct, indirect, special, incidental, or consequential damages of any | |||
| character including, without limitation, damages for lost profits, loss of | |||
| goodwill, work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses, even if such party shall have been | |||
| informed of the possibility of such damages. This limitation of liability | |||
| shall not apply to liability for death or personal injury resulting from such | |||
| party’s negligence to the extent applicable law prohibits such limitation. | |||
| Some jurisdictions do not allow the exclusion or limitation of incidental or | |||
| consequential damages, so this exclusion and limitation may not apply to You. | |||
| 8. Litigation | |||
| Any litigation relating to this License may be brought only in the courts of | |||
| a jurisdiction where the defendant maintains its principal place of business | |||
| and such litigation shall be governed by laws of that jurisdiction, without | |||
| reference to its conflict-of-law provisions. Nothing in this Section shall | |||
| prevent a party’s ability to bring cross-claims or counter-claims. | |||
| 9. Miscellaneous | |||
| This License represents the complete agreement concerning the subject matter | |||
| hereof. If any provision of this License is held to be unenforceable, such | |||
| provision shall be reformed only to the extent necessary to make it | |||
| enforceable. Any law or regulation which provides that the language of a | |||
| contract shall be construed against the drafter shall not be used to construe | |||
| this License against a Contributor. | |||
| 10. Versions of the License | |||
| 10.1. New Versions | |||
| Mozilla Foundation is the license steward. Except as provided in Section | |||
| 10.3, no one other than the license steward has the right to modify or | |||
| publish new versions of this License. Each version will be given a | |||
| distinguishing version number. | |||
| 10.2. Effect of New Versions | |||
| You may distribute the Covered Software under the terms of the version of | |||
| the License under which You originally received the Covered Software, or | |||
| under the terms of any subsequent version published by the license | |||
| steward. | |||
| 10.3. Modified Versions | |||
| If you create software not governed by this License, and you want to | |||
| create a new license for such software, you may create and use a modified | |||
| version of this License if you rename the license and remove any | |||
| references to the name of the license steward (except to note that such | |||
| modified license differs from this License). | |||
| 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses | |||
| If You choose to distribute Source Code Form that is Incompatible With | |||
| Secondary Licenses under the terms of this version of the License, the | |||
| notice described in Exhibit B of this License must be attached. | |||
| Exhibit A - Source Code Form License Notice | |||
| This Source Code Form is subject to the | |||
| terms of the Mozilla Public License, v. | |||
| 2.0. If a copy of the MPL was not | |||
| distributed with this file, You can | |||
| obtain one at | |||
| http://mozilla.org/MPL/2.0/. | |||
| If it is not possible or desirable to put the notice in a particular file, then | |||
| You may include the notice in a location (such as a LICENSE file in a relevant | |||
| directory) where a recipient would be likely to look for such a notice. | |||
| You may add additional accurate notices of copyright ownership. | |||
| Exhibit B - “Incompatible With Secondary Licenses” Notice | |||
| This Source Code Form is “Incompatible | |||
| With Secondary Licenses”, as defined by | |||
| the Mozilla Public License, v. 2.0. | |||
| @@ -0,0 +1,34 @@ | |||
| .PHONY: update-deps install-deps fmt lint golint test test-with-coverage | |||
| # TODO: When Go 1.9 is released vendor folder should be ignored automatically | |||
| PACKAGES=`go list ./... | grep -v vendor | grep -v mocks` | |||
| fmt: | |||
| for pkg in ${PACKAGES}; do \ | |||
| go fmt $$pkg; \ | |||
| done; | |||
| lint: | |||
| gometalinter --exclude=vendor/ --tests --config=gometalinter.json --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode ./... | |||
| golint: | |||
| for pkg in ${PACKAGES}; do \ | |||
| golint $$pkg; \ | |||
| done; | |||
| test: | |||
| TEST_FAILED= ; \ | |||
| for pkg in ${PACKAGES}; do \ | |||
| go test $$pkg || TEST_FAILED=1; \ | |||
| done; \ | |||
| [ -z "$$TEST_FAILED" ] | |||
| test-with-coverage: | |||
| echo "" > coverage.out | |||
| echo "mode: set" > coverage-all.out | |||
| TEST_FAILED= ; \ | |||
| for pkg in ${PACKAGES}; do \ | |||
| go test -coverprofile=coverage.out -covermode=set $$pkg || TEST_FAILED=1; \ | |||
| tail -n +2 coverage.out >> coverage-all.out; \ | |||
| done; \ | |||
| [ -z "$$TEST_FAILED" ] | |||
| #go tool cover -html=coverage-all.out | |||
| @@ -0,0 +1,58 @@ | |||
| ## Logging | |||
| A simple leveled logging library with coloured output. | |||
| [](https://travis-ci.org/RichardKnop/logging) | |||
| [](http://godoc.org/github.com/RichardKnop/logging) | |||
| [](https://codecov.io/gh/RichardKnop/logging) | |||
| --- | |||
| Log levels: | |||
| - `INFO` (blue) | |||
| - `WARNING` (pink) | |||
| - `ERROR` (red) | |||
| - `FATAL` (red) | |||
| Formatters: | |||
| - `DefaultFormatter` | |||
| - `ColouredFormatter` | |||
| Example usage. Create a new package `log` in your app such that: | |||
| ```go | |||
| package log | |||
| import ( | |||
| "github.com/RichardKnop/logging" | |||
| ) | |||
| var ( | |||
| logger = logging.New(nil, nil, new(logging.ColouredFormatter)) | |||
| // INFO ... | |||
| INFO = logger[logging.INFO] | |||
| // WARNING ... | |||
| WARNING = logger[logging.WARNING] | |||
| // ERROR ... | |||
| ERROR = logger[logging.ERROR] | |||
| // FATAL ... | |||
| FATAL = logger[logging.FATAL] | |||
| ) | |||
| ``` | |||
| Then from your app you could do: | |||
| ```go | |||
| package main | |||
| import ( | |||
| "github.com/yourusername/yourapp/log" | |||
| ) | |||
| func main() { | |||
| log.INFO.Print("log message") | |||
| } | |||
| ``` | |||
| @@ -0,0 +1,40 @@ | |||
| package logging | |||
| import ( | |||
| "fmt" | |||
| ) | |||
| const ( | |||
| // For colouring | |||
| resetSeq = "\033[0m" | |||
| colourSeq = "\033[0;%dm" | |||
| ) | |||
| // Colour map | |||
| var colour = map[level]string{ | |||
| INFO: fmt.Sprintf(colourSeq, 94), // blue | |||
| WARNING: fmt.Sprintf(colourSeq, 95), // pink | |||
| ERROR: fmt.Sprintf(colourSeq, 91), // red | |||
| FATAL: fmt.Sprintf(colourSeq, 91), // red | |||
| } | |||
| // ColouredFormatter colours log messages with ASCI escape codes | |||
| // and adds filename and line number before the log message | |||
| // See https://en.wikipedia.org/wiki/ANSI_escape_code | |||
| type ColouredFormatter struct { | |||
| } | |||
| // GetPrefix returns colour escape code | |||
| func (f *ColouredFormatter) GetPrefix(lvl level) string { | |||
| return colour[lvl] | |||
| } | |||
| // GetSuffix returns reset sequence code | |||
| func (f *ColouredFormatter) GetSuffix(lvl level) string { | |||
| return resetSeq | |||
| } | |||
| // Format adds filename and line number before the log message | |||
| func (f *ColouredFormatter) Format(lvl level, v ...interface{}) []interface{} { | |||
| return append([]interface{}{header()}, v...) | |||
| } | |||
| @@ -0,0 +1,20 @@ | |||
| package logging | |||
| // DefaultFormatter adds filename and line number before the log message | |||
| type DefaultFormatter struct { | |||
| } | |||
| // GetPrefix returns "" | |||
| func (f *DefaultFormatter) GetPrefix(lvl level) string { | |||
| return "" | |||
| } | |||
| // GetSuffix returns "" | |||
| func (f *DefaultFormatter) GetSuffix(lvl level) string { | |||
| return "" | |||
| } | |||
| // Format adds filename and line number before the log message | |||
| func (f *DefaultFormatter) Format(lvl level, v ...interface{}) []interface{} { | |||
| return append([]interface{}{header()}, v...) | |||
| } | |||
| @@ -0,0 +1,30 @@ | |||
| package logging | |||
| import ( | |||
| "fmt" | |||
| "path/filepath" | |||
| "runtime" | |||
| ) | |||
| const ( | |||
| // Runtime caller depth | |||
| depth = 3 | |||
| ) | |||
| // Formatter interface | |||
| type Formatter interface { | |||
| GetPrefix(lvl level) string | |||
| Format(lvl level, v ...interface{}) []interface{} | |||
| GetSuffix(lvl level) string | |||
| } | |||
| // Returns header including filename and line number | |||
| func header() string { | |||
| _, fn, line, ok := runtime.Caller(depth) | |||
| if !ok { | |||
| fn = "???" | |||
| line = 1 | |||
| } | |||
| return fmt.Sprintf("%s:%d ", filepath.Base(fn), line) | |||
| } | |||
| @@ -0,0 +1,7 @@ | |||
| module github.com/RichardKnop/logging | |||
| require ( | |||
| github.com/davecgh/go-spew v1.1.1 // indirect | |||
| github.com/pmezard/go-difflib v1.0.0 // indirect | |||
| github.com/stretchr/testify v1.2.2 | |||
| ) | |||
| @@ -0,0 +1,6 @@ | |||
| github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= | |||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | |||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | |||
| github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= | |||
| github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= | |||
| @@ -0,0 +1,9 @@ | |||
| { | |||
| "Linters": | |||
| { | |||
| "vet": | |||
| { | |||
| "Command": "go tool vet" | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,17 @@ | |||
| package logging | |||
| // LoggerInterface will accept stdlib logger and a custom logger. | |||
| // There's no standard interface, this is the closest we get, unfortunately. | |||
| type LoggerInterface interface { | |||
| Print(...interface{}) | |||
| Printf(string, ...interface{}) | |||
| Println(...interface{}) | |||
| Fatal(...interface{}) | |||
| Fatalf(string, ...interface{}) | |||
| Fatalln(...interface{}) | |||
| Panic(...interface{}) | |||
| Panicf(string, ...interface{}) | |||
| Panicln(...interface{}) | |||
| } | |||
| @@ -0,0 +1,134 @@ | |||
| package logging | |||
| import ( | |||
| "io" | |||
| "log" | |||
| "os" | |||
| ) | |||
| // Level type | |||
| type level int | |||
| const ( | |||
| // DEBUG level | |||
| DEBUG level = iota | |||
| // INFO level | |||
| INFO | |||
| // WARNING level | |||
| WARNING | |||
| // ERROR level | |||
| ERROR | |||
| // FATAL level | |||
| FATAL | |||
| flag = log.Ldate | log.Ltime | |||
| ) | |||
| // Log level prefix map | |||
| var prefix = map[level]string{ | |||
| DEBUG: "DEBUG: ", | |||
| INFO: "INFO: ", | |||
| WARNING: "WARNING: ", | |||
| ERROR: "ERROR: ", | |||
| FATAL: "FATAL: ", | |||
| } | |||
| // Logger ... | |||
| type Logger map[level]LoggerInterface | |||
| // New returns instance of Logger | |||
| func New(out, errOut io.Writer, f Formatter) Logger { | |||
| // Fall back to stdout if out not set | |||
| if out == nil { | |||
| out = os.Stdout | |||
| } | |||
| // Fall back to stderr if errOut not set | |||
| if errOut == nil { | |||
| errOut = os.Stderr | |||
| } | |||
| // Fall back to DefaultFormatter if f not set | |||
| if f == nil { | |||
| f = new(DefaultFormatter) | |||
| } | |||
| l := make(map[level]LoggerInterface, 5) | |||
| l[DEBUG] = &Wrapper{lvl: DEBUG, formatter: f, logger: log.New(out, f.GetPrefix(DEBUG)+prefix[DEBUG], flag)} | |||
| l[INFO] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(out, f.GetPrefix(INFO)+prefix[INFO], flag)} | |||
| l[WARNING] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(out, f.GetPrefix(WARNING)+prefix[WARNING], flag)} | |||
| l[ERROR] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(errOut, f.GetPrefix(ERROR)+prefix[ERROR], flag)} | |||
| l[FATAL] = &Wrapper{lvl: INFO, formatter: f, logger: log.New(errOut, f.GetPrefix(FATAL)+prefix[FATAL], flag)} | |||
| return Logger(l) | |||
| } | |||
| // Wrapper ... | |||
| type Wrapper struct { | |||
| lvl level | |||
| formatter Formatter | |||
| logger LoggerInterface | |||
| } | |||
| // Print ... | |||
| func (w *Wrapper) Print(v ...interface{}) { | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| v = append(v, w.formatter.GetSuffix(w.lvl)) | |||
| w.logger.Print(v...) | |||
| } | |||
| // Printf ... | |||
| func (w *Wrapper) Printf(format string, v ...interface{}) { | |||
| suffix := w.formatter.GetSuffix(w.lvl) | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| w.logger.Printf("%s"+format+suffix, v...) | |||
| } | |||
| // Println ... | |||
| func (w *Wrapper) Println(v ...interface{}) { | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| v = append(v, w.formatter.GetSuffix(w.lvl)) | |||
| w.logger.Println(v...) | |||
| } | |||
| // Fatal ... | |||
| func (w *Wrapper) Fatal(v ...interface{}) { | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| v = append(v, w.formatter.GetSuffix(w.lvl)) | |||
| w.logger.Fatal(v...) | |||
| } | |||
| // Fatalf ... | |||
| func (w *Wrapper) Fatalf(format string, v ...interface{}) { | |||
| suffix := w.formatter.GetSuffix(w.lvl) | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| w.logger.Fatalf("%s"+format+suffix, v...) | |||
| } | |||
| // Fatalln ... | |||
| func (w *Wrapper) Fatalln(v ...interface{}) { | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| v = append(v, w.formatter.GetSuffix(w.lvl)) | |||
| w.logger.Fatalln(v...) | |||
| } | |||
| // Panic ... | |||
| func (w *Wrapper) Panic(v ...interface{}) { | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| v = append(v, w.formatter.GetSuffix(w.lvl)) | |||
| w.logger.Fatal(v...) | |||
| } | |||
| // Panicf ... | |||
| func (w *Wrapper) Panicf(format string, v ...interface{}) { | |||
| suffix := w.formatter.GetSuffix(w.lvl) | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| w.logger.Panicf("%s"+format+suffix, v...) | |||
| } | |||
| // Panicln ... | |||
| func (w *Wrapper) Panicln(v ...interface{}) { | |||
| v = w.formatter.Format(w.lvl, v...) | |||
| v = append(v, w.formatter.GetSuffix(w.lvl)) | |||
| w.logger.Panicln(v...) | |||
| } | |||
| @@ -0,0 +1,354 @@ | |||
| Mozilla Public License, version 2.0 | |||
| 1. Definitions | |||
| 1.1. “Contributor” | |||
| means each individual or legal entity that creates, contributes to the | |||
| creation of, or owns Covered Software. | |||
| 1.2. “Contributor Version” | |||
| means the combination of the Contributions of others (if any) used by a | |||
| Contributor and that particular Contributor’s Contribution. | |||
| 1.3. “Contribution” | |||
| means Covered Software of a particular Contributor. | |||
| 1.4. “Covered Software” | |||
| means Source Code Form to which the initial Contributor has attached the | |||
| notice in Exhibit A, the Executable Form of such Source Code Form, and | |||
| Modifications of such Source Code Form, in each case including portions | |||
| thereof. | |||
| 1.5. “Incompatible With Secondary Licenses” | |||
| means | |||
| a. that the initial Contributor has attached the notice described in | |||
| Exhibit B to the Covered Software; or | |||
| b. that the Covered Software was made available under the terms of version | |||
| 1.1 or earlier of the License, but not also under the terms of a | |||
| Secondary License. | |||
| 1.6. “Executable Form” | |||
| means any form of the work other than Source Code Form. | |||
| 1.7. “Larger Work” | |||
| means a work that combines Covered Software with other material, in a separate | |||
| file or files, that is not Covered Software. | |||
| 1.8. “License” | |||
| means this document. | |||
| 1.9. “Licensable” | |||
| means having the right to grant, to the maximum extent possible, whether at the | |||
| time of the initial grant or subsequently, any and all of the rights conveyed by | |||
| this License. | |||
| 1.10. “Modifications” | |||
| means any of the following: | |||
| a. any file in Source Code Form that results from an addition to, deletion | |||
| from, or modification of the contents of Covered Software; or | |||
| b. any new file in Source Code Form that contains any Covered Software. | |||
| 1.11. “Patent Claims” of a Contributor | |||
| means any patent claim(s), including without limitation, method, process, | |||
| and apparatus claims, in any patent Licensable by such Contributor that | |||
| would be infringed, but for the grant of the License, by the making, | |||
| using, selling, offering for sale, having made, import, or transfer of | |||
| either its Contributions or its Contributor Version. | |||
| 1.12. “Secondary License” | |||
| means either the GNU General Public License, Version 2.0, the GNU Lesser | |||
| General Public License, Version 2.1, the GNU Affero General Public | |||
| License, Version 3.0, or any later versions of those licenses. | |||
| 1.13. “Source Code Form” | |||
| means the form of the work preferred for making modifications. | |||
| 1.14. “You” (or “Your”) | |||
| means an individual or a legal entity exercising rights under this | |||
| License. For legal entities, “You” includes any entity that controls, is | |||
| controlled by, or is under common control with You. For purposes of this | |||
| definition, “control” means (a) the power, direct or indirect, to cause | |||
| the direction or management of such entity, whether by contract or | |||
| otherwise, or (b) ownership of more than fifty percent (50%) of the | |||
| outstanding shares or beneficial ownership of such entity. | |||
| 2. License Grants and Conditions | |||
| 2.1. Grants | |||
| Each Contributor hereby grants You a world-wide, royalty-free, | |||
| non-exclusive license: | |||
| a. under intellectual property rights (other than patent or trademark) | |||
| Licensable by such Contributor to use, reproduce, make available, | |||
| modify, display, perform, distribute, and otherwise exploit its | |||
| Contributions, either on an unmodified basis, with Modifications, or as | |||
| part of a Larger Work; and | |||
| b. under Patent Claims of such Contributor to make, use, sell, offer for | |||
| sale, have made, import, and otherwise transfer either its Contributions | |||
| or its Contributor Version. | |||
| 2.2. Effective Date | |||
| The licenses granted in Section 2.1 with respect to any Contribution become | |||
| effective for each Contribution on the date the Contributor first distributes | |||
| such Contribution. | |||
| 2.3. Limitations on Grant Scope | |||
| The licenses granted in this Section 2 are the only rights granted under this | |||
| License. No additional rights or licenses will be implied from the distribution | |||
| or licensing of Covered Software under this License. Notwithstanding Section | |||
| 2.1(b) above, no patent license is granted by a Contributor: | |||
| a. for any code that a Contributor has removed from Covered Software; or | |||
| b. for infringements caused by: (i) Your and any other third party’s | |||
| modifications of Covered Software, or (ii) the combination of its | |||
| Contributions with other software (except as part of its Contributor | |||
| Version); or | |||
| c. under Patent Claims infringed by Covered Software in the absence of its | |||
| Contributions. | |||
| This License does not grant any rights in the trademarks, service marks, or | |||
| logos of any Contributor (except as may be necessary to comply with the | |||
| notice requirements in Section 3.4). | |||
| 2.4. Subsequent Licenses | |||
| No Contributor makes additional grants as a result of Your choice to | |||
| distribute the Covered Software under a subsequent version of this License | |||
| (see Section 10.2) or under the terms of a Secondary License (if permitted | |||
| under the terms of Section 3.3). | |||
| 2.5. Representation | |||
| Each Contributor represents that the Contributor believes its Contributions | |||
| are its original creation(s) or it has sufficient rights to grant the | |||
| rights to its Contributions conveyed by this License. | |||
| 2.6. Fair Use | |||
| This License is not intended to limit any rights You have under applicable | |||
| copyright doctrines of fair use, fair dealing, or other equivalents. | |||
| 2.7. Conditions | |||
| Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in | |||
| Section 2.1. | |||
| 3. Responsibilities | |||
| 3.1. Distribution of Source Form | |||
| All distribution of Covered Software in Source Code Form, including any | |||
| Modifications that You create or to which You contribute, must be under the | |||
| terms of this License. You must inform recipients that the Source Code Form | |||
| of the Covered Software is governed by the terms of this License, and how | |||
| they can obtain a copy of this License. You may not attempt to alter or | |||
| restrict the recipients’ rights in the Source Code Form. | |||
| 3.2. Distribution of Executable Form | |||
| If You distribute Covered Software in Executable Form then: | |||
| a. such Covered Software must also be made available in Source Code Form, | |||
| as described in Section 3.1, and You must inform recipients of the | |||
| Executable Form how they can obtain a copy of such Source Code Form by | |||
| reasonable means in a timely manner, at a charge no more than the cost | |||
| of distribution to the recipient; and | |||
| b. You may distribute such Executable Form under the terms of this License, | |||
| or sublicense it under different terms, provided that the license for | |||
| the Executable Form does not attempt to limit or alter the recipients’ | |||
| rights in the Source Code Form under this License. | |||
| 3.3. Distribution of a Larger Work | |||
| You may create and distribute a Larger Work under terms of Your choice, | |||
| provided that You also comply with the requirements of this License for the | |||
| Covered Software. If the Larger Work is a combination of Covered Software | |||
| with a work governed by one or more Secondary Licenses, and the Covered | |||
| Software is not Incompatible With Secondary Licenses, this License permits | |||
| You to additionally distribute such Covered Software under the terms of | |||
| such Secondary License(s), so that the recipient of the Larger Work may, at | |||
| their option, further distribute the Covered Software under the terms of | |||
| either this License or such Secondary License(s). | |||
| 3.4. Notices | |||
| You may not remove or alter the substance of any license notices (including | |||
| copyright notices, patent notices, disclaimers of warranty, or limitations | |||
| of liability) contained within the Source Code Form of the Covered | |||
| Software, except that You may alter any license notices to the extent | |||
| required to remedy known factual inaccuracies. | |||
| 3.5. Application of Additional Terms | |||
| You may choose to offer, and to charge a fee for, warranty, support, | |||
| indemnity or liability obligations to one or more recipients of Covered | |||
| Software. However, You may do so only on Your own behalf, and not on behalf | |||
| of any Contributor. You must make it absolutely clear that any such | |||
| warranty, support, indemnity, or liability obligation is offered by You | |||
| alone, and You hereby agree to indemnify every Contributor for any | |||
| liability incurred by such Contributor as a result of warranty, support, | |||
| indemnity or liability terms You offer. You may include additional | |||
| disclaimers of warranty and limitations of liability specific to any | |||
| jurisdiction. | |||
| 4. Inability to Comply Due to Statute or Regulation | |||
| If it is impossible for You to comply with any of the terms of this License | |||
| with respect to some or all of the Covered Software due to statute, judicial | |||
| order, or regulation then You must: (a) comply with the terms of this License | |||
| to the maximum extent possible; and (b) describe the limitations and the code | |||
| they affect. Such description must be placed in a text file included with all | |||
| distributions of the Covered Software under this License. Except to the | |||
| extent prohibited by statute or regulation, such description must be | |||
| sufficiently detailed for a recipient of ordinary skill to be able to | |||
| understand it. | |||
| 5. Termination | |||
| 5.1. The rights granted under this License will terminate automatically if You | |||
| fail to comply with any of its terms. However, if You become compliant, | |||
| then the rights granted under this License from a particular Contributor | |||
| are reinstated (a) provisionally, unless and until such Contributor | |||
| explicitly and finally terminates Your grants, and (b) on an ongoing basis, | |||
| if such Contributor fails to notify You of the non-compliance by some | |||
| reasonable means prior to 60 days after You have come back into compliance. | |||
| Moreover, Your grants from a particular Contributor are reinstated on an | |||
| ongoing basis if such Contributor notifies You of the non-compliance by | |||
| some reasonable means, this is the first time You have received notice of | |||
| non-compliance with this License from such Contributor, and You become | |||
| compliant prior to 30 days after Your receipt of the notice. | |||
| 5.2. If You initiate litigation against any entity by asserting a patent | |||
| infringement claim (excluding declaratory judgment actions, counter-claims, | |||
| and cross-claims) alleging that a Contributor Version directly or | |||
| indirectly infringes any patent, then the rights granted to You by any and | |||
| all Contributors for the Covered Software under Section 2.1 of this License | |||
| shall terminate. | |||
| 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user | |||
| license agreements (excluding distributors and resellers) which have been | |||
| validly granted by You or Your distributors under this License prior to | |||
| termination shall survive termination. | |||
| 6. Disclaimer of Warranty | |||
| Covered Software is provided under this License on an “as is” basis, without | |||
| warranty of any kind, either expressed, implied, or statutory, including, | |||
| without limitation, warranties that the Covered Software is free of defects, | |||
| merchantable, fit for a particular purpose or non-infringing. The entire | |||
| risk as to the quality and performance of the Covered Software is with You. | |||
| Should any Covered Software prove defective in any respect, You (not any | |||
| Contributor) assume the cost of any necessary servicing, repair, or | |||
| correction. This disclaimer of warranty constitutes an essential part of this | |||
| License. No use of any Covered Software is authorized under this License | |||
| except under this disclaimer. | |||
| 7. Limitation of Liability | |||
| Under no circumstances and under no legal theory, whether tort (including | |||
| negligence), contract, or otherwise, shall any Contributor, or anyone who | |||
| distributes Covered Software as permitted above, be liable to You for any | |||
| direct, indirect, special, incidental, or consequential damages of any | |||
| character including, without limitation, damages for lost profits, loss of | |||
| goodwill, work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses, even if such party shall have been | |||
| informed of the possibility of such damages. This limitation of liability | |||
| shall not apply to liability for death or personal injury resulting from such | |||
| party’s negligence to the extent applicable law prohibits such limitation. | |||
| Some jurisdictions do not allow the exclusion or limitation of incidental or | |||
| consequential damages, so this exclusion and limitation may not apply to You. | |||
| 8. Litigation | |||
| Any litigation relating to this License may be brought only in the courts of | |||
| a jurisdiction where the defendant maintains its principal place of business | |||
| and such litigation shall be governed by laws of that jurisdiction, without | |||
| reference to its conflict-of-law provisions. Nothing in this Section shall | |||
| prevent a party’s ability to bring cross-claims or counter-claims. | |||
| 9. Miscellaneous | |||
| This License represents the complete agreement concerning the subject matter | |||
| hereof. If any provision of this License is held to be unenforceable, such | |||
| provision shall be reformed only to the extent necessary to make it | |||
| enforceable. Any law or regulation which provides that the language of a | |||
| contract shall be construed against the drafter shall not be used to construe | |||
| this License against a Contributor. | |||
| 10. Versions of the License | |||
| 10.1. New Versions | |||
| Mozilla Foundation is the license steward. Except as provided in Section | |||
| 10.3, no one other than the license steward has the right to modify or | |||
| publish new versions of this License. Each version will be given a | |||
| distinguishing version number. | |||
| 10.2. Effect of New Versions | |||
| You may distribute the Covered Software under the terms of the version of | |||
| the License under which You originally received the Covered Software, or | |||
| under the terms of any subsequent version published by the license | |||
| steward. | |||
| 10.3. Modified Versions | |||
| If you create software not governed by this License, and you want to | |||
| create a new license for such software, you may create and use a modified | |||
| version of this License if you rename the license and remove any | |||
| references to the name of the license steward (except to note that such | |||
| modified license differs from this License). | |||
| 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses | |||
| If You choose to distribute Source Code Form that is Incompatible With | |||
| Secondary Licenses under the terms of this version of the License, the | |||
| notice described in Exhibit B of this License must be attached. | |||
| Exhibit A - Source Code Form License Notice | |||
| This Source Code Form is subject to the | |||
| terms of the Mozilla Public License, v. | |||
| 2.0. If a copy of the MPL was not | |||
| distributed with this file, You can | |||
| obtain one at | |||
| http://mozilla.org/MPL/2.0/. | |||
| If it is not possible or desirable to put the notice in a particular file, then | |||
| You may include the notice in a location (such as a LICENSE file in a relevant | |||
| directory) where a recipient would be likely to look for such a notice. | |||
| You may add additional accurate notices of copyright ownership. | |||
| Exhibit B - “Incompatible With Secondary Licenses” Notice | |||
| This Source Code Form is “Incompatible | |||
| With Secondary Licenses”, as defined by | |||
| the Mozilla Public License, v. 2.0. | |||
| @@ -0,0 +1,393 @@ | |||
| package amqp | |||
| // NOTE: Using AMQP as a result backend is quite tricky since every time we | |||
| // read a message from the queue keeping task states, the message is removed | |||
| // from the queue. This leads to problems with keeping a reliable state of a | |||
| // group of tasks since concurrent processes updating the group state cause | |||
| // race conditions and inconsistent state. | |||
| // | |||
| // This is avoided by a "clever" hack. A special queue identified by a group | |||
| // UUID is created and we store serialised TaskState objects of successfully | |||
| // completed tasks. By inspecting the queue we can then say: | |||
| // 1) If all group tasks finished (number of unacked messages = group task count) | |||
| // 2) If all group tasks finished AND succeeded (by consuming the queue) | |||
| // | |||
| // It is important to consume the queue exclusively to avoid race conditions. | |||
| import ( | |||
| "bytes" | |||
| "encoding/json" | |||
| "errors" | |||
| "fmt" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/streadway/amqp" | |||
| ) | |||
| // Backend represents an AMQP result backend | |||
| type Backend struct { | |||
| common.Backend | |||
| common.AMQPConnector | |||
| } | |||
| // New creates Backend instance | |||
| func New(cnf *config.Config) iface.Backend { | |||
| return &Backend{Backend: common.NewBackend(cnf), AMQPConnector: common.AMQPConnector{}} | |||
| } | |||
| // InitGroup creates and saves a group meta data object | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| return nil | |||
| } | |||
| // GroupCompleted returns true if all tasks in a group finished | |||
| // NOTE: Given AMQP limitation this will only return true if all finished | |||
| // tasks were successful as we do not keep track of completed failed tasks | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| queueState, err := b.InspectQueue(channel, groupUUID) | |||
| if err != nil { | |||
| return false, nil | |||
| } | |||
| return queueState.Messages == groupTaskCount, nil | |||
| } | |||
| // GroupTaskStates returns states of all tasks in the group | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| queueState, err := b.InspectQueue(channel, groupUUID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if queueState.Messages != groupTaskCount { | |||
| return nil, fmt.Errorf("Already consumed: %v", err) | |||
| } | |||
| deliveries, err := channel.Consume( | |||
| groupUUID, // queue name | |||
| "", // consumer tag | |||
| false, // auto-ack | |||
| true, // exclusive | |||
| false, // no-local | |||
| false, // no-wait | |||
| nil, // arguments | |||
| ) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("Queue consume error: %s", err) | |||
| } | |||
| states := make([]*tasks.TaskState, groupTaskCount) | |||
| for i := 0; i < groupTaskCount; i++ { | |||
| d := <-deliveries | |||
| state := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body))) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(state); err != nil { | |||
| d.Nack(false, false) // multiple, requeue | |||
| return nil, err | |||
| } | |||
| d.Ack(false) // multiple | |||
| states[i] = state | |||
| } | |||
| return states, nil | |||
| } | |||
| // TriggerChord flags chord as triggered in the backend storage to make sure | |||
| // chord is never trigerred multiple times. Returns a boolean flag to indicate | |||
| // whether the worker should trigger chord (true) or no if it has been triggered | |||
| // already (false) | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| _, err = b.InspectQueue(channel, amqmChordTriggeredQueue(groupUUID)) | |||
| if err != nil { | |||
| return true, nil | |||
| } | |||
| return false, nil | |||
| } | |||
| // SetStatePending updates task state to PENDING | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| taskState := tasks.NewPendingTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateReceived updates task state to RECEIVED | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| taskState := tasks.NewReceivedTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateStarted updates task state to STARTED | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| taskState := tasks.NewStartedTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateRetry updates task state to RETRY | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| state := tasks.NewRetryTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateSuccess updates task state to SUCCESS | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| taskState := tasks.NewSuccessTaskState(signature, results) | |||
| if err := b.updateState(taskState); err != nil { | |||
| return err | |||
| } | |||
| if signature.GroupUUID == "" { | |||
| return nil | |||
| } | |||
| return b.markTaskCompleted(signature, taskState) | |||
| } | |||
| // SetStateFailure updates task state to FAILURE | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| taskState := tasks.NewFailureTaskState(signature, err) | |||
| if err := b.updateState(taskState); err != nil { | |||
| return err | |||
| } | |||
| if signature.GroupUUID == "" { | |||
| return nil | |||
| } | |||
| return b.markTaskCompleted(signature, taskState) | |||
| } | |||
| // GetState returns the latest task state. It will only return the status once | |||
| // as the message will get consumed and removed from the queue. | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| declareQueueArgs := amqp.Table{ | |||
| // Time in milliseconds | |||
| // after that message will expire | |||
| "x-message-ttl": int32(b.getExpiresIn()), | |||
| // Time after that the queue will be deleted. | |||
| "x-expires": int32(b.getExpiresIn()), | |||
| } | |||
| conn, channel, _, _, _, err := b.Connect( | |||
| b.GetConfig().ResultBackend, | |||
| b.GetConfig().TLSConfig, | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| b.GetConfig().AMQP.ExchangeType, // exchange type | |||
| taskUUID, // queue name | |||
| false, // queue durable | |||
| true, // queue delete when unused | |||
| taskUUID, // queue binding key | |||
| nil, // exchange declare args | |||
| declareQueueArgs, // queue declare args | |||
| nil, // queue binding args | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| d, ok, err := channel.Get( | |||
| taskUUID, // queue name | |||
| false, // multiple | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if !ok { | |||
| return nil, errors.New("No state ready") | |||
| } | |||
| d.Ack(false) | |||
| state := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader([]byte(d.Body))) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(state); err != nil { | |||
| log.ERROR.Printf("Failed to unmarshal task state: %s", string(d.Body)) | |||
| log.ERROR.Print(err) | |||
| return nil, err | |||
| } | |||
| return state, nil | |||
| } | |||
| // PurgeState deletes stored task state | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| return b.DeleteQueue(channel, taskUUID) | |||
| } | |||
| // PurgeGroupMeta deletes stored group meta data | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| conn, channel, err := b.Open(b.GetConfig().ResultBackend, b.GetConfig().TLSConfig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| b.DeleteQueue(channel, amqmChordTriggeredQueue(groupUUID)) | |||
| return b.DeleteQueue(channel, groupUUID) | |||
| } | |||
| // updateState saves current task state | |||
| func (b *Backend) updateState(taskState *tasks.TaskState) error { | |||
| message, err := json.Marshal(taskState) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| declareQueueArgs := amqp.Table{ | |||
| // Time in milliseconds | |||
| // after that message will expire | |||
| "x-message-ttl": int32(b.getExpiresIn()), | |||
| // Time after that the queue will be deleted. | |||
| "x-expires": int32(b.getExpiresIn()), | |||
| } | |||
| conn, channel, queue, confirmsChan, _, err := b.Connect( | |||
| b.GetConfig().ResultBackend, | |||
| b.GetConfig().TLSConfig, | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| b.GetConfig().AMQP.ExchangeType, // exchange type | |||
| taskState.TaskUUID, // queue name | |||
| false, // queue durable | |||
| true, // queue delete when unused | |||
| taskState.TaskUUID, // queue binding key | |||
| nil, // exchange declare args | |||
| declareQueueArgs, // queue declare args | |||
| nil, // queue binding args | |||
| ) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| if err := channel.Publish( | |||
| b.GetConfig().AMQP.Exchange, // exchange | |||
| queue.Name, // routing key | |||
| false, // mandatory | |||
| false, // immediate | |||
| amqp.Publishing{ | |||
| ContentType: "application/json", | |||
| Body: message, | |||
| DeliveryMode: amqp.Persistent, // Persistent // Transient | |||
| }, | |||
| ); err != nil { | |||
| return err | |||
| } | |||
| confirmed := <-confirmsChan | |||
| if confirmed.Ack { | |||
| return nil | |||
| } | |||
| return fmt.Errorf("Failed delivery of delivery tag: %d", confirmed.DeliveryTag) | |||
| } | |||
| // getExpiresIn returns expiration time | |||
| func (b *Backend) getExpiresIn() int { | |||
| resultsExpireIn := b.GetConfig().ResultsExpireIn * 1000 | |||
| if resultsExpireIn == 0 { | |||
| // // expire results after 1 hour by default | |||
| resultsExpireIn = config.DefaultResultsExpireIn * 1000 | |||
| } | |||
| return resultsExpireIn | |||
| } | |||
| // markTaskCompleted marks task as completed in either groupdUUID_success | |||
| // or groupUUID_failure queue. This is important for GroupCompleted and | |||
| // GroupSuccessful methods | |||
| func (b *Backend) markTaskCompleted(signature *tasks.Signature, taskState *tasks.TaskState) error { | |||
| if signature.GroupUUID == "" || signature.GroupTaskCount == 0 { | |||
| return nil | |||
| } | |||
| message, err := json.Marshal(taskState) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| declareQueueArgs := amqp.Table{ | |||
| // Time in milliseconds | |||
| // after that message will expire | |||
| "x-message-ttl": int32(b.getExpiresIn()), | |||
| // Time after that the queue will be deleted. | |||
| "x-expires": int32(b.getExpiresIn()), | |||
| } | |||
| conn, channel, queue, confirmsChan, _, err := b.Connect( | |||
| b.GetConfig().ResultBackend, | |||
| b.GetConfig().TLSConfig, | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| b.GetConfig().AMQP.ExchangeType, // exchange type | |||
| signature.GroupUUID, // queue name | |||
| false, // queue durable | |||
| true, // queue delete when unused | |||
| signature.GroupUUID, // queue binding key | |||
| nil, // exchange declare args | |||
| declareQueueArgs, // queue declare args | |||
| nil, // queue binding args | |||
| ) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| if err := channel.Publish( | |||
| b.GetConfig().AMQP.Exchange, // exchange | |||
| queue.Name, // routing key | |||
| false, // mandatory | |||
| false, // immediate | |||
| amqp.Publishing{ | |||
| ContentType: "application/json", | |||
| Body: message, | |||
| DeliveryMode: amqp.Persistent, // Persistent // Transient | |||
| }, | |||
| ); err != nil { | |||
| return err | |||
| } | |||
| confirmed := <-confirmsChan | |||
| if !confirmed.Ack { | |||
| return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag) | |||
| } | |||
| return nil | |||
| } | |||
| func amqmChordTriggeredQueue(groupUUID string) string { | |||
| return fmt.Sprintf("%s_chord_triggered", groupUUID) | |||
| } | |||
| @@ -0,0 +1,512 @@ | |||
| package dynamodb | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "time" | |||
| "github.com/aws/aws-sdk-go/aws/session" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/aws/aws-sdk-go/aws" | |||
| "github.com/aws/aws-sdk-go/service/dynamodb" | |||
| "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" | |||
| "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" | |||
| ) | |||
| // Backend ... | |||
| type Backend struct { | |||
| common.Backend | |||
| cnf *config.Config | |||
| client dynamodbiface.DynamoDBAPI | |||
| } | |||
| // New creates a Backend instance | |||
| func New(cnf *config.Config) iface.Backend { | |||
| backend := &Backend{Backend: common.NewBackend(cnf), cnf: cnf} | |||
| if cnf.DynamoDB != nil && cnf.DynamoDB.Client != nil { | |||
| backend.client = cnf.DynamoDB.Client | |||
| } else { | |||
| sess := session.Must(session.NewSessionWithOptions(session.Options{ | |||
| SharedConfigState: session.SharedConfigEnable, | |||
| })) | |||
| backend.client = dynamodb.New(sess) | |||
| } | |||
| // Check if needed tables exist | |||
| err := backend.checkRequiredTablesIfExist() | |||
| if err != nil { | |||
| log.FATAL.Printf("Failed to prepare tables. Error: %v", err) | |||
| } | |||
| return backend | |||
| } | |||
| // InitGroup ... | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| meta := tasks.GroupMeta{ | |||
| GroupUUID: groupUUID, | |||
| TaskUUIDs: taskUUIDs, | |||
| CreatedAt: time.Now().UTC(), | |||
| } | |||
| av, err := dynamodbattribute.MarshalMap(meta) | |||
| if err != nil { | |||
| log.ERROR.Printf("Error when marshaling Dynamodb attributes. Err: %v", err) | |||
| return err | |||
| } | |||
| input := &dynamodb.PutItemInput{ | |||
| Item: av, | |||
| TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), | |||
| } | |||
| _, err = b.client.PutItem(input) | |||
| if err != nil { | |||
| log.ERROR.Printf("Got error when calling PutItem: %v; Error: %v", input, err) | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // GroupCompleted ... | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| taskStates, err := b.getStates(groupMeta.TaskUUIDs...) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| var countSuccessTasks = 0 | |||
| for _, taskState := range taskStates { | |||
| if taskState.IsCompleted() { | |||
| countSuccessTasks++ | |||
| } | |||
| } | |||
| return countSuccessTasks == groupTaskCount, nil | |||
| } | |||
| // GroupTaskStates ... | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return b.getStates(groupMeta.TaskUUIDs...) | |||
| } | |||
| // TriggerChord ... | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| // Get the group meta data | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| // Chord has already been triggered, return false (should not trigger again) | |||
| if groupMeta.ChordTriggered { | |||
| return false, nil | |||
| } | |||
| // If group meta is locked, wait until it's unlocked | |||
| for groupMeta.Lock { | |||
| groupMeta, _ = b.getGroupMeta(groupUUID) | |||
| log.WARNING.Print("Group meta locked, waiting") | |||
| time.Sleep(time.Millisecond * 5) | |||
| } | |||
| // Acquire lock | |||
| if err = b.lockGroupMeta(groupUUID); err != nil { | |||
| return false, err | |||
| } | |||
| defer b.unlockGroupMeta(groupUUID) | |||
| // update group meta data | |||
| err = b.chordTriggered(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| return true, err | |||
| } | |||
| // SetStatePending ... | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| taskState := tasks.NewPendingTaskState(signature) | |||
| // taskUUID is the primary key of the table, so a new task need to be created first, instead of using dynamodb.UpdateItemInput directly | |||
| return b.initTaskState(taskState) | |||
| } | |||
| // SetStateReceived ... | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| taskState := tasks.NewReceivedTaskState(signature) | |||
| return b.setTaskState(taskState) | |||
| } | |||
| // SetStateStarted ... | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| taskState := tasks.NewStartedTaskState(signature) | |||
| return b.setTaskState(taskState) | |||
| } | |||
| // SetStateRetry ... | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| taskState := tasks.NewRetryTaskState(signature) | |||
| return b.setTaskState(taskState) | |||
| } | |||
| // SetStateSuccess ... | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| taskState := tasks.NewSuccessTaskState(signature, results) | |||
| return b.setTaskState(taskState) | |||
| } | |||
| // SetStateFailure ... | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| taskState := tasks.NewFailureTaskState(signature, err) | |||
| return b.updateToFailureStateWithError(taskState) | |||
| } | |||
| // GetState ... | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| result, err := b.client.GetItem(&dynamodb.GetItemInput{ | |||
| TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "TaskUUID": { | |||
| S: aws.String(taskUUID), | |||
| }, | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return b.unmarshalTaskStateGetItemResult(result) | |||
| } | |||
| // PurgeState ... | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| input := &dynamodb.DeleteItemInput{ | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "TaskUUID": { | |||
| N: aws.String(taskUUID), | |||
| }, | |||
| }, | |||
| TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), | |||
| } | |||
| _, err := b.client.DeleteItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // PurgeGroupMeta ... | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| input := &dynamodb.DeleteItemInput{ | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "GroupUUID": { | |||
| N: aws.String(groupUUID), | |||
| }, | |||
| }, | |||
| TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), | |||
| } | |||
| _, err := b.client.DeleteItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { | |||
| result, err := b.client.GetItem(&dynamodb.GetItemInput{ | |||
| TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "GroupUUID": { | |||
| S: aws.String(groupUUID), | |||
| }, | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| log.ERROR.Printf("Error when getting group meta. Error: %v", err) | |||
| return nil, err | |||
| } | |||
| item, err := b.unmarshalGroupMetaGetItemResult(result) | |||
| if err != nil { | |||
| log.INFO.Println("!!!", result) | |||
| log.ERROR.Printf("Failed to unmarshal item, %v", err) | |||
| return nil, err | |||
| } | |||
| return item, nil | |||
| } | |||
| func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { | |||
| var states []*tasks.TaskState | |||
| stateChan := make(chan *tasks.TaskState, len(taskUUIDs)) | |||
| errChan := make(chan error) | |||
| // There is no method like querying items by `in` a list of primary keys. | |||
| // So a for loop with go routine is used to get multiple items | |||
| for _, id := range taskUUIDs { | |||
| go func(id string) { | |||
| state, err := b.GetState(id) | |||
| if err != nil { | |||
| errChan <- err | |||
| } | |||
| stateChan <- state | |||
| }(id) | |||
| } | |||
| for s := range stateChan { | |||
| states = append(states, s) | |||
| if len(states) == len(taskUUIDs) { | |||
| close(stateChan) | |||
| } | |||
| } | |||
| return states, nil | |||
| } | |||
| func (b *Backend) lockGroupMeta(groupUUID string) error { | |||
| err := b.updateGroupMetaLock(groupUUID, true) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) unlockGroupMeta(groupUUID string) error { | |||
| err := b.updateGroupMetaLock(groupUUID, false) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) updateGroupMetaLock(groupUUID string, status bool) error { | |||
| input := &dynamodb.UpdateItemInput{ | |||
| ExpressionAttributeNames: map[string]*string{ | |||
| "#L": aws.String("Lock"), | |||
| }, | |||
| ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ | |||
| ":l": { | |||
| BOOL: aws.Bool(status), | |||
| }, | |||
| }, | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "GroupUUID": { | |||
| S: aws.String(groupUUID), | |||
| }, | |||
| }, | |||
| ReturnValues: aws.String("UPDATED_NEW"), | |||
| TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), | |||
| UpdateExpression: aws.String("SET #L = :l"), | |||
| } | |||
| _, err := b.client.UpdateItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) chordTriggered(groupUUID string) error { | |||
| input := &dynamodb.UpdateItemInput{ | |||
| ExpressionAttributeNames: map[string]*string{ | |||
| "#CT": aws.String("ChordTriggered"), | |||
| }, | |||
| ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ | |||
| ":ct": { | |||
| BOOL: aws.Bool(true), | |||
| }, | |||
| }, | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "GroupUUID": { | |||
| S: aws.String(groupUUID), | |||
| }, | |||
| }, | |||
| ReturnValues: aws.String("UPDATED_NEW"), | |||
| TableName: aws.String(b.cnf.DynamoDB.GroupMetasTable), | |||
| UpdateExpression: aws.String("SET #CT = :ct"), | |||
| } | |||
| _, err := b.client.UpdateItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) setTaskState(taskState *tasks.TaskState) error { | |||
| expAttributeNames := map[string]*string{ | |||
| "#S": aws.String("State"), | |||
| } | |||
| expAttributeValues := map[string]*dynamodb.AttributeValue{ | |||
| ":s": { | |||
| S: aws.String(taskState.State), | |||
| }, | |||
| } | |||
| keyAttributeValues := map[string]*dynamodb.AttributeValue{ | |||
| "TaskUUID": { | |||
| S: aws.String(taskState.TaskUUID), | |||
| }, | |||
| } | |||
| exp := "SET #S = :s" | |||
| if !taskState.CreatedAt.IsZero() { | |||
| expAttributeNames["#C"] = aws.String("CreatedAt") | |||
| expAttributeValues[":c"] = &dynamodb.AttributeValue{ | |||
| S: aws.String(taskState.CreatedAt.String()), | |||
| } | |||
| exp += ", #C = :c" | |||
| } | |||
| if taskState.Results != nil && len(taskState.Results) != 0 { | |||
| expAttributeNames["#R"] = aws.String("Results") | |||
| var results []*dynamodb.AttributeValue | |||
| for _, r := range taskState.Results { | |||
| avMap := map[string]*dynamodb.AttributeValue{ | |||
| "Type": { | |||
| S: aws.String(r.Type), | |||
| }, | |||
| "Value": { | |||
| S: aws.String(fmt.Sprintf("%v", r.Value)), | |||
| }, | |||
| } | |||
| rs := &dynamodb.AttributeValue{ | |||
| M: avMap, | |||
| } | |||
| results = append(results, rs) | |||
| } | |||
| expAttributeValues[":r"] = &dynamodb.AttributeValue{ | |||
| L: results, | |||
| } | |||
| exp += ", #R = :r" | |||
| } | |||
| input := &dynamodb.UpdateItemInput{ | |||
| ExpressionAttributeNames: expAttributeNames, | |||
| ExpressionAttributeValues: expAttributeValues, | |||
| Key: keyAttributeValues, | |||
| ReturnValues: aws.String("UPDATED_NEW"), | |||
| TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), | |||
| UpdateExpression: aws.String(exp), | |||
| } | |||
| _, err := b.client.UpdateItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) initTaskState(taskState *tasks.TaskState) error { | |||
| av, err := dynamodbattribute.MarshalMap(taskState) | |||
| input := &dynamodb.PutItemInput{ | |||
| Item: av, | |||
| TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = b.client.PutItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) updateToFailureStateWithError(taskState *tasks.TaskState) error { | |||
| input := &dynamodb.UpdateItemInput{ | |||
| ExpressionAttributeNames: map[string]*string{ | |||
| "#S": aws.String("State"), | |||
| "#E": aws.String("Error"), | |||
| }, | |||
| ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ | |||
| ":s": { | |||
| S: aws.String(taskState.State), | |||
| }, | |||
| ":e": { | |||
| S: aws.String(taskState.Error), | |||
| }, | |||
| }, | |||
| Key: map[string]*dynamodb.AttributeValue{ | |||
| "TaskUUID": { | |||
| S: aws.String(taskState.TaskUUID), | |||
| }, | |||
| }, | |||
| ReturnValues: aws.String("UPDATED_NEW"), | |||
| TableName: aws.String(b.cnf.DynamoDB.TaskStatesTable), | |||
| UpdateExpression: aws.String("SET #S = :s, #E = :e"), | |||
| } | |||
| _, err := b.client.UpdateItem(input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) unmarshalGroupMetaGetItemResult(result *dynamodb.GetItemOutput) (*tasks.GroupMeta, error) { | |||
| if result == nil { | |||
| err := errors.New("task state is nil") | |||
| log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) | |||
| return nil, err | |||
| } | |||
| item := tasks.GroupMeta{} | |||
| err := dynamodbattribute.UnmarshalMap(result.Item, &item) | |||
| if err != nil { | |||
| log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) | |||
| return nil, err | |||
| } | |||
| return &item, err | |||
| } | |||
| func (b *Backend) unmarshalTaskStateGetItemResult(result *dynamodb.GetItemOutput) (*tasks.TaskState, error) { | |||
| if result == nil { | |||
| err := errors.New("task state is nil") | |||
| log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) | |||
| return nil, err | |||
| } | |||
| state := tasks.TaskState{} | |||
| err := dynamodbattribute.UnmarshalMap(result.Item, &state) | |||
| if err != nil { | |||
| log.ERROR.Printf("Got error when unmarshal map. Error: %v", err) | |||
| return nil, err | |||
| } | |||
| return &state, nil | |||
| } | |||
| func (b *Backend) checkRequiredTablesIfExist() error { | |||
| var ( | |||
| taskTableName = b.cnf.DynamoDB.TaskStatesTable | |||
| groupTableName = b.cnf.DynamoDB.GroupMetasTable | |||
| ) | |||
| result, err := b.client.ListTables(&dynamodb.ListTablesInput{}) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if !b.tableExists(taskTableName, result.TableNames) { | |||
| return errors.New("task table doesn't exist") | |||
| } | |||
| if !b.tableExists(groupTableName, result.TableNames) { | |||
| return errors.New("group table doesn't exist") | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) tableExists(tableName string, tableNames []*string) bool { | |||
| for _, t := range tableNames { | |||
| if tableName == *t { | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| @@ -0,0 +1,210 @@ | |||
| package eager | |||
| import ( | |||
| "bytes" | |||
| "encoding/json" | |||
| "fmt" | |||
| "sync" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // ErrGroupNotFound ... | |||
| type ErrGroupNotFound struct { | |||
| groupUUID string | |||
| } | |||
| // NewErrGroupNotFound returns new instance of ErrGroupNotFound | |||
| func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound { | |||
| return ErrGroupNotFound{groupUUID: groupUUID} | |||
| } | |||
| // Error implements error interface | |||
| func (e ErrGroupNotFound) Error() string { | |||
| return fmt.Sprintf("Group not found: %v", e.groupUUID) | |||
| } | |||
| // ErrTasknotFound ... | |||
| type ErrTasknotFound struct { | |||
| taskUUID string | |||
| } | |||
| // NewErrTasknotFound returns new instance of ErrTasknotFound | |||
| func NewErrTasknotFound(taskUUID string) ErrTasknotFound { | |||
| return ErrTasknotFound{taskUUID: taskUUID} | |||
| } | |||
| // Error implements error interface | |||
| func (e ErrTasknotFound) Error() string { | |||
| return fmt.Sprintf("Task not found: %v", e.taskUUID) | |||
| } | |||
| // Backend represents an "eager" in-memory result backend | |||
| type Backend struct { | |||
| common.Backend | |||
| groups map[string][]string | |||
| tasks map[string][]byte | |||
| stateMutex sync.Mutex | |||
| } | |||
| // New creates EagerBackend instance | |||
| func New() iface.Backend { | |||
| return &Backend{ | |||
| Backend: common.NewBackend(new(config.Config)), | |||
| groups: make(map[string][]string), | |||
| tasks: make(map[string][]byte), | |||
| } | |||
| } | |||
| // InitGroup creates and saves a group meta data object | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| tasks := make([]string, 0, len(taskUUIDs)) | |||
| // copy every task | |||
| for _, v := range taskUUIDs { | |||
| tasks = append(tasks, v) | |||
| } | |||
| b.groups[groupUUID] = tasks | |||
| return nil | |||
| } | |||
| // GroupCompleted returns true if all tasks in a group finished | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| tasks, ok := b.groups[groupUUID] | |||
| if !ok { | |||
| return false, NewErrGroupNotFound(groupUUID) | |||
| } | |||
| var countSuccessTasks = 0 | |||
| for _, v := range tasks { | |||
| t, err := b.GetState(v) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| if t.IsCompleted() { | |||
| countSuccessTasks++ | |||
| } | |||
| } | |||
| return countSuccessTasks == groupTaskCount, nil | |||
| } | |||
| // GroupTaskStates returns states of all tasks in the group | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| taskUUIDs, ok := b.groups[groupUUID] | |||
| if !ok { | |||
| return nil, NewErrGroupNotFound(groupUUID) | |||
| } | |||
| ret := make([]*tasks.TaskState, 0, groupTaskCount) | |||
| for _, taskUUID := range taskUUIDs { | |||
| t, err := b.GetState(taskUUID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| ret = append(ret, t) | |||
| } | |||
| return ret, nil | |||
| } | |||
| // TriggerChord flags chord as triggered in the backend storage to make sure | |||
| // chord is never trigerred multiple times. Returns a boolean flag to indicate | |||
| // whether the worker should trigger chord (true) or no if it has been triggered | |||
| // already (false) | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| return true, nil | |||
| } | |||
| // SetStatePending updates task state to PENDING | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| state := tasks.NewPendingTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateReceived updates task state to RECEIVED | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| state := tasks.NewReceivedTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateStarted updates task state to STARTED | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| state := tasks.NewStartedTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateRetry updates task state to RETRY | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| state := tasks.NewRetryTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateSuccess updates task state to SUCCESS | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| state := tasks.NewSuccessTaskState(signature, results) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateFailure updates task state to FAILURE | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| state := tasks.NewFailureTaskState(signature, err) | |||
| return b.updateState(state) | |||
| } | |||
| // GetState returns the latest task state | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| tasktStateBytes, ok := b.tasks[taskUUID] | |||
| if !ok { | |||
| return nil, NewErrTasknotFound(taskUUID) | |||
| } | |||
| state := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader(tasktStateBytes)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(state); err != nil { | |||
| return nil, fmt.Errorf("Failed to unmarshal task state %v", b) | |||
| } | |||
| return state, nil | |||
| } | |||
| // PurgeState deletes stored task state | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| _, ok := b.tasks[taskUUID] | |||
| if !ok { | |||
| return NewErrTasknotFound(taskUUID) | |||
| } | |||
| delete(b.tasks, taskUUID) | |||
| return nil | |||
| } | |||
| // PurgeGroupMeta deletes stored group meta data | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| _, ok := b.groups[groupUUID] | |||
| if !ok { | |||
| return NewErrGroupNotFound(groupUUID) | |||
| } | |||
| delete(b.groups, groupUUID) | |||
| return nil | |||
| } | |||
| func (b *Backend) updateState(s *tasks.TaskState) error { | |||
| // simulate the behavior of json marshal/unmarshal | |||
| b.stateMutex.Lock() | |||
| defer b.stateMutex.Unlock() | |||
| msg, err := json.Marshal(s) | |||
| if err != nil { | |||
| return fmt.Errorf("Marshal task state error: %v", err) | |||
| } | |||
| b.tasks[s.TaskUUID] = msg | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,28 @@ | |||
| package iface | |||
| import ( | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // Backend - a common interface for all result backends | |||
| type Backend interface { | |||
| // Group related functions | |||
| InitGroup(groupUUID string, taskUUIDs []string) error | |||
| GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) | |||
| GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) | |||
| TriggerChord(groupUUID string) (bool, error) | |||
| // Setting / getting task state | |||
| SetStatePending(signature *tasks.Signature) error | |||
| SetStateReceived(signature *tasks.Signature) error | |||
| SetStateStarted(signature *tasks.Signature) error | |||
| SetStateRetry(signature *tasks.Signature) error | |||
| SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error | |||
| SetStateFailure(signature *tasks.Signature, err string) error | |||
| GetState(taskUUID string) (*tasks.TaskState, error) | |||
| // Purging stored stored tasks states and group meta data | |||
| IsAMQP() bool | |||
| PurgeState(taskUUID string) error | |||
| PurgeGroupMeta(groupUUID string) error | |||
| } | |||
| @@ -0,0 +1,292 @@ | |||
| package memcache | |||
| import ( | |||
| "bytes" | |||
| "encoding/json" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| gomemcache "github.com/bradfitz/gomemcache/memcache" | |||
| ) | |||
| // Backend represents a Memcache result backend | |||
| type Backend struct { | |||
| common.Backend | |||
| servers []string | |||
| client *gomemcache.Client | |||
| } | |||
| // New creates Backend instance | |||
| func New(cnf *config.Config, servers []string) iface.Backend { | |||
| return &Backend{ | |||
| Backend: common.NewBackend(cnf), | |||
| servers: servers, | |||
| } | |||
| } | |||
| // InitGroup creates and saves a group meta data object | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| groupMeta := &tasks.GroupMeta{ | |||
| GroupUUID: groupUUID, | |||
| TaskUUIDs: taskUUIDs, | |||
| CreatedAt: time.Now().UTC(), | |||
| } | |||
| encoded, err := json.Marshal(&groupMeta) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return b.getClient().Set(&gomemcache.Item{ | |||
| Key: groupUUID, | |||
| Value: encoded, | |||
| Expiration: b.getExpirationTimestamp(), | |||
| }) | |||
| } | |||
| // GroupCompleted returns true if all tasks in a group finished | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| taskStates, err := b.getStates(groupMeta.TaskUUIDs...) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| var countSuccessTasks = 0 | |||
| for _, taskState := range taskStates { | |||
| if taskState.IsCompleted() { | |||
| countSuccessTasks++ | |||
| } | |||
| } | |||
| return countSuccessTasks == groupTaskCount, nil | |||
| } | |||
| // GroupTaskStates returns states of all tasks in the group | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return []*tasks.TaskState{}, err | |||
| } | |||
| return b.getStates(groupMeta.TaskUUIDs...) | |||
| } | |||
| // TriggerChord flags chord as triggered in the backend storage to make sure | |||
| // chord is never trigerred multiple times. Returns a boolean flag to indicate | |||
| // whether the worker should trigger chord (true) or no if it has been triggered | |||
| // already (false) | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| // Chord has already been triggered, return false (should not trigger again) | |||
| if groupMeta.ChordTriggered { | |||
| return false, nil | |||
| } | |||
| // If group meta is locked, wait until it's unlocked | |||
| for groupMeta.Lock { | |||
| groupMeta, _ = b.getGroupMeta(groupUUID) | |||
| log.WARNING.Print("Group meta locked, waiting") | |||
| time.Sleep(time.Millisecond * 5) | |||
| } | |||
| // Acquire lock | |||
| if err = b.lockGroupMeta(groupMeta); err != nil { | |||
| return false, err | |||
| } | |||
| defer b.unlockGroupMeta(groupMeta) | |||
| // Update the group meta data | |||
| groupMeta.ChordTriggered = true | |||
| encoded, err := json.Marshal(&groupMeta) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| if err = b.getClient().Replace(&gomemcache.Item{ | |||
| Key: groupUUID, | |||
| Value: encoded, | |||
| Expiration: b.getExpirationTimestamp(), | |||
| }); err != nil { | |||
| return false, err | |||
| } | |||
| return true, nil | |||
| } | |||
| // SetStatePending updates task state to PENDING | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| taskState := tasks.NewPendingTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateReceived updates task state to RECEIVED | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| taskState := tasks.NewReceivedTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateStarted updates task state to STARTED | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| taskState := tasks.NewStartedTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateRetry updates task state to RETRY | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| state := tasks.NewRetryTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateSuccess updates task state to SUCCESS | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| taskState := tasks.NewSuccessTaskState(signature, results) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateFailure updates task state to FAILURE | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| taskState := tasks.NewFailureTaskState(signature, err) | |||
| return b.updateState(taskState) | |||
| } | |||
| // GetState returns the latest task state | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| item, err := b.getClient().Get(taskUUID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| state := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader(item.Value)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(state); err != nil { | |||
| return nil, err | |||
| } | |||
| return state, nil | |||
| } | |||
| // PurgeState deletes stored task state | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| return b.getClient().Delete(taskUUID) | |||
| } | |||
| // PurgeGroupMeta deletes stored group meta data | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| return b.getClient().Delete(groupUUID) | |||
| } | |||
| // updateState saves current task state | |||
| func (b *Backend) updateState(taskState *tasks.TaskState) error { | |||
| encoded, err := json.Marshal(taskState) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return b.getClient().Set(&gomemcache.Item{ | |||
| Key: taskState.TaskUUID, | |||
| Value: encoded, | |||
| Expiration: b.getExpirationTimestamp(), | |||
| }) | |||
| } | |||
| // lockGroupMeta acquires lock on group meta data | |||
| func (b *Backend) lockGroupMeta(groupMeta *tasks.GroupMeta) error { | |||
| groupMeta.Lock = true | |||
| encoded, err := json.Marshal(groupMeta) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return b.getClient().Set(&gomemcache.Item{ | |||
| Key: groupMeta.GroupUUID, | |||
| Value: encoded, | |||
| Expiration: b.getExpirationTimestamp(), | |||
| }) | |||
| } | |||
| // unlockGroupMeta releases lock on group meta data | |||
| func (b *Backend) unlockGroupMeta(groupMeta *tasks.GroupMeta) error { | |||
| groupMeta.Lock = false | |||
| encoded, err := json.Marshal(groupMeta) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return b.getClient().Set(&gomemcache.Item{ | |||
| Key: groupMeta.GroupUUID, | |||
| Value: encoded, | |||
| Expiration: b.getExpirationTimestamp(), | |||
| }) | |||
| } | |||
| // getGroupMeta retrieves group meta data, convenience function to avoid repetition | |||
| func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { | |||
| item, err := b.getClient().Get(groupUUID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| groupMeta := new(tasks.GroupMeta) | |||
| decoder := json.NewDecoder(bytes.NewReader(item.Value)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(groupMeta); err != nil { | |||
| return nil, err | |||
| } | |||
| return groupMeta, nil | |||
| } | |||
| // getStates returns multiple task states | |||
| func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { | |||
| states := make([]*tasks.TaskState, len(taskUUIDs)) | |||
| for i, taskUUID := range taskUUIDs { | |||
| item, err := b.getClient().Get(taskUUID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| state := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader(item.Value)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(state); err != nil { | |||
| return nil, err | |||
| } | |||
| states[i] = state | |||
| } | |||
| return states, nil | |||
| } | |||
| // getExpirationTimestamp returns expiration timestamp | |||
| func (b *Backend) getExpirationTimestamp() int32 { | |||
| expiresIn := b.GetConfig().ResultsExpireIn | |||
| if expiresIn == 0 { | |||
| // // expire results after 1 hour by default | |||
| expiresIn = config.DefaultResultsExpireIn | |||
| } | |||
| return int32(time.Now().Unix() + int64(expiresIn)) | |||
| } | |||
| // getClient returns or creates instance of Memcache client | |||
| func (b *Backend) getClient() *gomemcache.Client { | |||
| if b.client == nil { | |||
| b.client = gomemcache.New(b.servers...) | |||
| } | |||
| return b.client | |||
| } | |||
| @@ -0,0 +1,358 @@ | |||
| package mongo | |||
| import ( | |||
| "context" | |||
| "encoding/json" | |||
| "fmt" | |||
| "reflect" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "go.mongodb.org/mongo-driver/bson" | |||
| "go.mongodb.org/mongo-driver/mongo" | |||
| "go.mongodb.org/mongo-driver/mongo/options" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // Backend represents a MongoDB result backend | |||
| type Backend struct { | |||
| common.Backend | |||
| client *mongo.Client | |||
| tc *mongo.Collection | |||
| gmc *mongo.Collection | |||
| once sync.Once | |||
| } | |||
| // New creates Backend instance | |||
| func New(cnf *config.Config) (iface.Backend, error) { | |||
| backend := &Backend{ | |||
| Backend: common.NewBackend(cnf), | |||
| once: sync.Once{}, | |||
| } | |||
| return backend, nil | |||
| } | |||
| // InitGroup creates and saves a group meta data object | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| groupMeta := &tasks.GroupMeta{ | |||
| GroupUUID: groupUUID, | |||
| TaskUUIDs: taskUUIDs, | |||
| CreatedAt: time.Now().UTC(), | |||
| } | |||
| _, err := b.groupMetasCollection().InsertOne(context.Background(), groupMeta) | |||
| return err | |||
| } | |||
| // GroupCompleted returns true if all tasks in a group finished | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| taskStates, err := b.getStates(groupMeta.TaskUUIDs...) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| var countSuccessTasks = 0 | |||
| for _, taskState := range taskStates { | |||
| if taskState.IsCompleted() { | |||
| countSuccessTasks++ | |||
| } | |||
| } | |||
| return countSuccessTasks == groupTaskCount, nil | |||
| } | |||
| // GroupTaskStates returns states of all tasks in the group | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return []*tasks.TaskState{}, err | |||
| } | |||
| return b.getStates(groupMeta.TaskUUIDs...) | |||
| } | |||
| // TriggerChord flags chord as triggered in the backend storage to make sure | |||
| // chord is never triggered multiple times. Returns a boolean flag to indicate | |||
| // whether the worker should trigger chord (true) or no if it has been triggered | |||
| // already (false) | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| query := bson.M{ | |||
| "_id": groupUUID, | |||
| "chord_triggered": false, | |||
| } | |||
| change := bson.M{ | |||
| "$set": bson.M{ | |||
| "chord_triggered": true, | |||
| }, | |||
| } | |||
| _, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update()) | |||
| if err != nil { | |||
| if err == mongo.ErrNoDocuments { | |||
| log.WARNING.Printf("Chord already triggered for group %s", groupUUID) | |||
| return false, nil | |||
| } | |||
| return false, err | |||
| } | |||
| return true, nil | |||
| } | |||
| // SetStatePending updates task state to PENDING | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| update := bson.M{ | |||
| "state": tasks.StatePending, | |||
| "task_name": signature.Name, | |||
| "created_at": time.Now().UTC(), | |||
| } | |||
| return b.updateState(signature, update) | |||
| } | |||
| // SetStateReceived updates task state to RECEIVED | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| update := bson.M{"state": tasks.StateReceived} | |||
| return b.updateState(signature, update) | |||
| } | |||
| // SetStateStarted updates task state to STARTED | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| update := bson.M{"state": tasks.StateStarted} | |||
| return b.updateState(signature, update) | |||
| } | |||
| // SetStateRetry updates task state to RETRY | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| update := bson.M{"state": tasks.StateRetry} | |||
| return b.updateState(signature, update) | |||
| } | |||
| // SetStateSuccess updates task state to SUCCESS | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| decodedResults := b.decodeResults(results) | |||
| update := bson.M{ | |||
| "state": tasks.StateSuccess, | |||
| "results": decodedResults, | |||
| } | |||
| return b.updateState(signature, update) | |||
| } | |||
| // decodeResults detects & decodes json strings in TaskResult.Value and returns a new slice | |||
| func (b *Backend) decodeResults(results []*tasks.TaskResult) []*tasks.TaskResult { | |||
| l := len(results) | |||
| jsonResults := make([]*tasks.TaskResult, l, l) | |||
| for i, result := range results { | |||
| jsonResult := new(bson.M) | |||
| resultType := reflect.TypeOf(result.Value).Kind() | |||
| if resultType == reflect.String { | |||
| err := json.NewDecoder(strings.NewReader(result.Value.(string))).Decode(&jsonResult) | |||
| if err == nil { | |||
| jsonResults[i] = &tasks.TaskResult{ | |||
| Type: "json", | |||
| Value: jsonResult, | |||
| } | |||
| continue | |||
| } | |||
| } | |||
| jsonResults[i] = result | |||
| } | |||
| return jsonResults | |||
| } | |||
| // SetStateFailure updates task state to FAILURE | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| update := bson.M{"state": tasks.StateFailure, "error": err} | |||
| return b.updateState(signature, update) | |||
| } | |||
| // GetState returns the latest task state | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| state := &tasks.TaskState{} | |||
| err := b.tasksCollection().FindOne(context.Background(), bson.M{"_id": taskUUID}).Decode(state) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return state, nil | |||
| } | |||
| // PurgeState deletes stored task state | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| _, err := b.tasksCollection().DeleteOne(context.Background(), bson.M{"_id": taskUUID}) | |||
| return err | |||
| } | |||
| // PurgeGroupMeta deletes stored group meta data | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| _, err := b.groupMetasCollection().DeleteOne(context.Background(), bson.M{"_id": groupUUID}) | |||
| return err | |||
| } | |||
| // lockGroupMeta acquires lock on groupUUID document | |||
| func (b *Backend) lockGroupMeta(groupUUID string) error { | |||
| query := bson.M{ | |||
| "_id": groupUUID, | |||
| "lock": false, | |||
| } | |||
| change := bson.M{ | |||
| "$set": bson.M{ | |||
| "lock": true, | |||
| }, | |||
| } | |||
| _, err := b.groupMetasCollection().UpdateOne(context.Background(), query, change, options.Update().SetUpsert(true)) | |||
| return err | |||
| } | |||
| // unlockGroupMeta releases lock on groupUUID document | |||
| func (b *Backend) unlockGroupMeta(groupUUID string) error { | |||
| update := bson.M{"$set": bson.M{"lock": false}} | |||
| _, err := b.groupMetasCollection().UpdateOne(context.Background(), bson.M{"_id": groupUUID}, update, options.Update()) | |||
| return err | |||
| } | |||
| // getGroupMeta retrieves group meta data, convenience function to avoid repetition | |||
| func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { | |||
| groupMeta := &tasks.GroupMeta{} | |||
| query := bson.M{"_id": groupUUID} | |||
| err := b.groupMetasCollection().FindOne(context.Background(), query).Decode(groupMeta) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return groupMeta, nil | |||
| } | |||
| // getStates returns multiple task states | |||
| func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { | |||
| states := make([]*tasks.TaskState, 0, len(taskUUIDs)) | |||
| cur, err := b.tasksCollection().Find(context.Background(), bson.M{"_id": bson.M{"$in": taskUUIDs}}) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer cur.Close(context.Background()) | |||
| for cur.Next(context.Background()) { | |||
| state := &tasks.TaskState{} | |||
| if err := cur.Decode(state); err != nil { | |||
| return nil, err | |||
| } | |||
| states = append(states, state) | |||
| } | |||
| if cur.Err() != nil { | |||
| return nil, err | |||
| } | |||
| return states, nil | |||
| } | |||
| // updateState saves current task state | |||
| func (b *Backend) updateState(signature *tasks.Signature, update bson.M) error { | |||
| update = bson.M{"$set": update} | |||
| _, err := b.tasksCollection().UpdateOne(context.Background(), bson.M{"_id": signature.UUID}, update, options.Update().SetUpsert(true)) | |||
| return err | |||
| } | |||
| func (b *Backend) tasksCollection() *mongo.Collection { | |||
| b.once.Do(func() { | |||
| b.connect() | |||
| }) | |||
| return b.tc | |||
| } | |||
| func (b *Backend) groupMetasCollection() *mongo.Collection { | |||
| b.once.Do(func() { | |||
| b.connect() | |||
| }) | |||
| return b.gmc | |||
| } | |||
| // connect creates the underlying mgo connection if it doesn't exist | |||
| // creates required indexes for our collections | |||
| func (b *Backend) connect() error { | |||
| client, err := b.dial() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| b.client = client | |||
| database := "machinery" | |||
| if b.GetConfig().MongoDB != nil { | |||
| database = b.GetConfig().MongoDB.Database | |||
| } | |||
| b.tc = b.client.Database(database).Collection("tasks") | |||
| b.gmc = b.client.Database(database).Collection("group_metas") | |||
| err = b.createMongoIndexes(database) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // dial connects to mongo with TLSConfig if provided | |||
| // else connects via ResultBackend uri | |||
| func (b *Backend) dial() (*mongo.Client, error) { | |||
| if b.GetConfig().MongoDB != nil && b.GetConfig().MongoDB.Client != nil { | |||
| return b.GetConfig().MongoDB.Client, nil | |||
| } | |||
| uri := b.GetConfig().ResultBackend | |||
| if strings.HasPrefix(uri, "mongodb://") == false && | |||
| strings.HasPrefix(uri, "mongodb+srv://") == false { | |||
| uri = fmt.Sprintf("mongodb://%s", uri) | |||
| } | |||
| client, err := mongo.NewClient(options.Client().ApplyURI(uri)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) | |||
| defer cancel() | |||
| if err := client.Connect(ctx); err != nil { | |||
| return nil, err | |||
| } | |||
| return client, nil | |||
| } | |||
| // createMongoIndexes ensures all indexes are in place | |||
| func (b *Backend) createMongoIndexes(database string) error { | |||
| tasksCollection := b.client.Database(database).Collection("tasks") | |||
| expireIn := int32(b.GetConfig().ResultsExpireIn) | |||
| _, err := tasksCollection.Indexes().CreateMany(context.Background(), []mongo.IndexModel{ | |||
| { | |||
| Keys: bson.M{"state": 1}, | |||
| Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn), | |||
| }, | |||
| mongo.IndexModel{ | |||
| Keys: bson.M{"lock": 1}, | |||
| Options: options.Index().SetBackground(true).SetExpireAfterSeconds(expireIn), | |||
| }, | |||
| }) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return err | |||
| } | |||
| @@ -0,0 +1,150 @@ | |||
| package null | |||
| import ( | |||
| "fmt" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // ErrGroupNotFound ... | |||
| type ErrGroupNotFound struct { | |||
| groupUUID string | |||
| } | |||
| // NewErrGroupNotFound returns new instance of ErrGroupNotFound | |||
| func NewErrGroupNotFound(groupUUID string) ErrGroupNotFound { | |||
| return ErrGroupNotFound{groupUUID: groupUUID} | |||
| } | |||
| // Error implements error interface | |||
| func (e ErrGroupNotFound) Error() string { | |||
| return fmt.Sprintf("Group not found: %v", e.groupUUID) | |||
| } | |||
| // ErrTasknotFound ... | |||
| type ErrTasknotFound struct { | |||
| taskUUID string | |||
| } | |||
| // NewErrTasknotFound returns new instance of ErrTasknotFound | |||
| func NewErrTasknotFound(taskUUID string) ErrTasknotFound { | |||
| return ErrTasknotFound{taskUUID: taskUUID} | |||
| } | |||
| // Error implements error interface | |||
| func (e ErrTasknotFound) Error() string { | |||
| return fmt.Sprintf("Task not found: %v", e.taskUUID) | |||
| } | |||
| // Backend represents an "eager" in-memory result backend | |||
| type Backend struct { | |||
| common.Backend | |||
| groups map[string]struct{} | |||
| } | |||
| // New creates EagerBackend instance | |||
| func New() iface.Backend { | |||
| return &Backend{ | |||
| Backend: common.NewBackend(new(config.Config)), | |||
| groups: make(map[string]struct{}), | |||
| } | |||
| } | |||
| // InitGroup creates and saves a group meta data object | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| b.groups[groupUUID] = struct{}{} | |||
| return nil | |||
| } | |||
| // GroupCompleted returns true if all tasks in a group finished | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| _, ok := b.groups[groupUUID] | |||
| if !ok { | |||
| return false, NewErrGroupNotFound(groupUUID) | |||
| } | |||
| return true, nil | |||
| } | |||
| // GroupTaskStates returns states of all tasks in the group | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| _, ok := b.groups[groupUUID] | |||
| if !ok { | |||
| return nil, NewErrGroupNotFound(groupUUID) | |||
| } | |||
| ret := make([]*tasks.TaskState, 0, groupTaskCount) | |||
| return ret, nil | |||
| } | |||
| // TriggerChord flags chord as triggered in the backend storage to make sure | |||
| // chord is never trigerred multiple times. Returns a boolean flag to indicate | |||
| // whether the worker should trigger chord (true) or no if it has been triggered | |||
| // already (false) | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| return true, nil | |||
| } | |||
| // SetStatePending updates task state to PENDING | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| state := tasks.NewPendingTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateReceived updates task state to RECEIVED | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| state := tasks.NewReceivedTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateStarted updates task state to STARTED | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| state := tasks.NewStartedTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateRetry updates task state to RETRY | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| state := tasks.NewRetryTaskState(signature) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateSuccess updates task state to SUCCESS | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| state := tasks.NewSuccessTaskState(signature, results) | |||
| return b.updateState(state) | |||
| } | |||
| // SetStateFailure updates task state to FAILURE | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| state := tasks.NewFailureTaskState(signature, err) | |||
| return b.updateState(state) | |||
| } | |||
| // GetState returns the latest task state | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| return nil, NewErrTasknotFound(taskUUID) | |||
| } | |||
| // PurgeState deletes stored task state | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| return NewErrTasknotFound(taskUUID) | |||
| } | |||
| // PurgeGroupMeta deletes stored group meta data | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| _, ok := b.groups[groupUUID] | |||
| if !ok { | |||
| return NewErrGroupNotFound(groupUUID) | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Backend) updateState(s *tasks.TaskState) error { | |||
| // simulate the behavior of json marshal/unmarshal | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,338 @@ | |||
| package redis | |||
| import ( | |||
| "bytes" | |||
| "encoding/json" | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/RichardKnop/redsync" | |||
| "github.com/gomodule/redigo/redis" | |||
| ) | |||
| // Backend represents a Redis result backend | |||
| type Backend struct { | |||
| common.Backend | |||
| host string | |||
| password string | |||
| db int | |||
| pool *redis.Pool | |||
| // If set, path to a socket file overrides hostname | |||
| socketPath string | |||
| redsync *redsync.Redsync | |||
| redisOnce sync.Once | |||
| common.RedisConnector | |||
| } | |||
| // New creates Backend instance | |||
| func New(cnf *config.Config, host, password, socketPath string, db int) iface.Backend { | |||
| return &Backend{ | |||
| Backend: common.NewBackend(cnf), | |||
| host: host, | |||
| db: db, | |||
| password: password, | |||
| socketPath: socketPath, | |||
| } | |||
| } | |||
| // InitGroup creates and saves a group meta data object | |||
| func (b *Backend) InitGroup(groupUUID string, taskUUIDs []string) error { | |||
| groupMeta := &tasks.GroupMeta{ | |||
| GroupUUID: groupUUID, | |||
| TaskUUIDs: taskUUIDs, | |||
| CreatedAt: time.Now().UTC(), | |||
| } | |||
| encoded, err := json.Marshal(groupMeta) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| _, err = conn.Do("SET", groupUUID, encoded) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return b.setExpirationTime(groupUUID) | |||
| } | |||
| // GroupCompleted returns true if all tasks in a group finished | |||
| func (b *Backend) GroupCompleted(groupUUID string, groupTaskCount int) (bool, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| taskStates, err := b.getStates(groupMeta.TaskUUIDs...) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| var countSuccessTasks = 0 | |||
| for _, taskState := range taskStates { | |||
| if taskState.IsCompleted() { | |||
| countSuccessTasks++ | |||
| } | |||
| } | |||
| return countSuccessTasks == groupTaskCount, nil | |||
| } | |||
| // GroupTaskStates returns states of all tasks in the group | |||
| func (b *Backend) GroupTaskStates(groupUUID string, groupTaskCount int) ([]*tasks.TaskState, error) { | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return []*tasks.TaskState{}, err | |||
| } | |||
| return b.getStates(groupMeta.TaskUUIDs...) | |||
| } | |||
| // TriggerChord flags chord as triggered in the backend storage to make sure | |||
| // chord is never trigerred multiple times. Returns a boolean flag to indicate | |||
| // whether the worker should trigger chord (true) or no if it has been triggered | |||
| // already (false) | |||
| func (b *Backend) TriggerChord(groupUUID string) (bool, error) { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| m := b.redsync.NewMutex("TriggerChordMutex") | |||
| if err := m.Lock(); err != nil { | |||
| return false, err | |||
| } | |||
| defer m.Unlock() | |||
| groupMeta, err := b.getGroupMeta(groupUUID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| // Chord has already been triggered, return false (should not trigger again) | |||
| if groupMeta.ChordTriggered { | |||
| return false, nil | |||
| } | |||
| // Set flag to true | |||
| groupMeta.ChordTriggered = true | |||
| // Update the group meta | |||
| encoded, err := json.Marshal(&groupMeta) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| _, err = conn.Do("SET", groupUUID, encoded) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| return true, b.setExpirationTime(groupUUID) | |||
| } | |||
| func (b *Backend) mergeNewTaskState(newState *tasks.TaskState) { | |||
| state, err := b.GetState(newState.TaskUUID) | |||
| if err == nil { | |||
| newState.CreatedAt = state.CreatedAt | |||
| newState.TaskName = state.TaskName | |||
| } | |||
| } | |||
| // SetStatePending updates task state to PENDING | |||
| func (b *Backend) SetStatePending(signature *tasks.Signature) error { | |||
| taskState := tasks.NewPendingTaskState(signature) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateReceived updates task state to RECEIVED | |||
| func (b *Backend) SetStateReceived(signature *tasks.Signature) error { | |||
| taskState := tasks.NewReceivedTaskState(signature) | |||
| b.mergeNewTaskState(taskState) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateStarted updates task state to STARTED | |||
| func (b *Backend) SetStateStarted(signature *tasks.Signature) error { | |||
| taskState := tasks.NewStartedTaskState(signature) | |||
| b.mergeNewTaskState(taskState) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateRetry updates task state to RETRY | |||
| func (b *Backend) SetStateRetry(signature *tasks.Signature) error { | |||
| taskState := tasks.NewRetryTaskState(signature) | |||
| b.mergeNewTaskState(taskState) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateSuccess updates task state to SUCCESS | |||
| func (b *Backend) SetStateSuccess(signature *tasks.Signature, results []*tasks.TaskResult) error { | |||
| taskState := tasks.NewSuccessTaskState(signature, results) | |||
| b.mergeNewTaskState(taskState) | |||
| return b.updateState(taskState) | |||
| } | |||
| // SetStateFailure updates task state to FAILURE | |||
| func (b *Backend) SetStateFailure(signature *tasks.Signature, err string) error { | |||
| taskState := tasks.NewFailureTaskState(signature, err) | |||
| b.mergeNewTaskState(taskState) | |||
| return b.updateState(taskState) | |||
| } | |||
| // GetState returns the latest task state | |||
| func (b *Backend) GetState(taskUUID string) (*tasks.TaskState, error) { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| item, err := redis.Bytes(conn.Do("GET", taskUUID)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| state := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader(item)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(state); err != nil { | |||
| return nil, err | |||
| } | |||
| return state, nil | |||
| } | |||
| // PurgeState deletes stored task state | |||
| func (b *Backend) PurgeState(taskUUID string) error { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| _, err := conn.Do("DEL", taskUUID) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // PurgeGroupMeta deletes stored group meta data | |||
| func (b *Backend) PurgeGroupMeta(groupUUID string) error { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| _, err := conn.Do("DEL", groupUUID) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // getGroupMeta retrieves group meta data, convenience function to avoid repetition | |||
| func (b *Backend) getGroupMeta(groupUUID string) (*tasks.GroupMeta, error) { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| item, err := redis.Bytes(conn.Do("GET", groupUUID)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| groupMeta := new(tasks.GroupMeta) | |||
| decoder := json.NewDecoder(bytes.NewReader(item)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(groupMeta); err != nil { | |||
| return nil, err | |||
| } | |||
| return groupMeta, nil | |||
| } | |||
| // getStates returns multiple task states | |||
| func (b *Backend) getStates(taskUUIDs ...string) ([]*tasks.TaskState, error) { | |||
| taskStates := make([]*tasks.TaskState, len(taskUUIDs)) | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| // conn.Do requires []interface{}... can't pass []string unfortunately | |||
| taskUUIDInterfaces := make([]interface{}, len(taskUUIDs)) | |||
| for i, taskUUID := range taskUUIDs { | |||
| taskUUIDInterfaces[i] = interface{}(taskUUID) | |||
| } | |||
| reply, err := redis.Values(conn.Do("MGET", taskUUIDInterfaces...)) | |||
| if err != nil { | |||
| return taskStates, err | |||
| } | |||
| for i, value := range reply { | |||
| stateBytes, ok := value.([]byte) | |||
| if !ok { | |||
| return taskStates, fmt.Errorf("Expected byte array, instead got: %v", value) | |||
| } | |||
| taskState := new(tasks.TaskState) | |||
| decoder := json.NewDecoder(bytes.NewReader(stateBytes)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(taskState); err != nil { | |||
| log.ERROR.Print(err) | |||
| return taskStates, err | |||
| } | |||
| taskStates[i] = taskState | |||
| } | |||
| return taskStates, nil | |||
| } | |||
| // updateState saves current task state | |||
| func (b *Backend) updateState(taskState *tasks.TaskState) error { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| encoded, err := json.Marshal(taskState) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = conn.Do("SET", taskState.TaskUUID, encoded) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return b.setExpirationTime(taskState.TaskUUID) | |||
| } | |||
| // setExpirationTime sets expiration timestamp on a stored task state | |||
| func (b *Backend) setExpirationTime(key string) error { | |||
| expiresIn := b.GetConfig().ResultsExpireIn | |||
| if expiresIn == 0 { | |||
| // // expire results after 1 hour by default | |||
| expiresIn = config.DefaultResultsExpireIn | |||
| } | |||
| expirationTimestamp := int32(time.Now().Unix() + int64(expiresIn)) | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| _, err := conn.Do("EXPIREAT", key, expirationTimestamp) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // open returns or creates instance of Redis connection | |||
| func (b *Backend) open() redis.Conn { | |||
| b.redisOnce.Do(func() { | |||
| b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig) | |||
| b.redsync = redsync.New([]redsync.Pool{b.pool}) | |||
| }) | |||
| return b.pool.Get() | |||
| } | |||
| @@ -0,0 +1,256 @@ | |||
| package result | |||
| import ( | |||
| "errors" | |||
| "reflect" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| var ( | |||
| // ErrBackendNotConfigured ... | |||
| ErrBackendNotConfigured = errors.New("Result backend not configured") | |||
| // ErrTimeoutReached ... | |||
| ErrTimeoutReached = errors.New("Timeout reached") | |||
| ) | |||
| // AsyncResult represents a task result | |||
| type AsyncResult struct { | |||
| Signature *tasks.Signature | |||
| taskState *tasks.TaskState | |||
| backend iface.Backend | |||
| } | |||
| // ChordAsyncResult represents a result of a chord | |||
| type ChordAsyncResult struct { | |||
| groupAsyncResults []*AsyncResult | |||
| chordAsyncResult *AsyncResult | |||
| backend iface.Backend | |||
| } | |||
| // ChainAsyncResult represents a result of a chain of tasks | |||
| type ChainAsyncResult struct { | |||
| asyncResults []*AsyncResult | |||
| backend iface.Backend | |||
| } | |||
| // NewAsyncResult creates AsyncResult instance | |||
| func NewAsyncResult(signature *tasks.Signature, backend iface.Backend) *AsyncResult { | |||
| return &AsyncResult{ | |||
| Signature: signature, | |||
| taskState: new(tasks.TaskState), | |||
| backend: backend, | |||
| } | |||
| } | |||
| // NewChordAsyncResult creates ChordAsyncResult instance | |||
| func NewChordAsyncResult(groupTasks []*tasks.Signature, chordCallback *tasks.Signature, backend iface.Backend) *ChordAsyncResult { | |||
| asyncResults := make([]*AsyncResult, len(groupTasks)) | |||
| for i, task := range groupTasks { | |||
| asyncResults[i] = NewAsyncResult(task, backend) | |||
| } | |||
| return &ChordAsyncResult{ | |||
| groupAsyncResults: asyncResults, | |||
| chordAsyncResult: NewAsyncResult(chordCallback, backend), | |||
| backend: backend, | |||
| } | |||
| } | |||
| // NewChainAsyncResult creates ChainAsyncResult instance | |||
| func NewChainAsyncResult(tasks []*tasks.Signature, backend iface.Backend) *ChainAsyncResult { | |||
| asyncResults := make([]*AsyncResult, len(tasks)) | |||
| for i, task := range tasks { | |||
| asyncResults[i] = NewAsyncResult(task, backend) | |||
| } | |||
| return &ChainAsyncResult{ | |||
| asyncResults: asyncResults, | |||
| backend: backend, | |||
| } | |||
| } | |||
| // Touch the state and don't wait | |||
| func (asyncResult *AsyncResult) Touch() ([]reflect.Value, error) { | |||
| if asyncResult.backend == nil { | |||
| return nil, ErrBackendNotConfigured | |||
| } | |||
| asyncResult.GetState() | |||
| // Purge state if we are using AMQP backend | |||
| if asyncResult.backend.IsAMQP() && asyncResult.taskState.IsCompleted() { | |||
| asyncResult.backend.PurgeState(asyncResult.taskState.TaskUUID) | |||
| } | |||
| if asyncResult.taskState.IsFailure() { | |||
| return nil, errors.New(asyncResult.taskState.Error) | |||
| } | |||
| if asyncResult.taskState.IsSuccess() { | |||
| return tasks.ReflectTaskResults(asyncResult.taskState.Results) | |||
| } | |||
| return nil, nil | |||
| } | |||
| // Get returns task results (synchronous blocking call) | |||
| func (asyncResult *AsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) { | |||
| for { | |||
| results, err := asyncResult.Touch() | |||
| if results == nil && err == nil { | |||
| time.Sleep(sleepDuration) | |||
| } else { | |||
| return results, err | |||
| } | |||
| } | |||
| } | |||
| // GetWithTimeout returns task results with a timeout (synchronous blocking call) | |||
| func (asyncResult *AsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) { | |||
| timeout := time.NewTimer(timeoutDuration) | |||
| for { | |||
| select { | |||
| case <-timeout.C: | |||
| return nil, ErrTimeoutReached | |||
| default: | |||
| results, err := asyncResult.Touch() | |||
| if results == nil && err == nil { | |||
| time.Sleep(sleepDuration) | |||
| } else { | |||
| return results, err | |||
| } | |||
| } | |||
| } | |||
| } | |||
| // GetState returns latest task state | |||
| func (asyncResult *AsyncResult) GetState() *tasks.TaskState { | |||
| if asyncResult.taskState.IsCompleted() { | |||
| return asyncResult.taskState | |||
| } | |||
| taskState, err := asyncResult.backend.GetState(asyncResult.Signature.UUID) | |||
| if err == nil { | |||
| asyncResult.taskState = taskState | |||
| } | |||
| return asyncResult.taskState | |||
| } | |||
| // Get returns results of a chain of tasks (synchronous blocking call) | |||
| func (chainAsyncResult *ChainAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) { | |||
| if chainAsyncResult.backend == nil { | |||
| return nil, ErrBackendNotConfigured | |||
| } | |||
| var ( | |||
| results []reflect.Value | |||
| err error | |||
| ) | |||
| for _, asyncResult := range chainAsyncResult.asyncResults { | |||
| results, err = asyncResult.Get(sleepDuration) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| return results, err | |||
| } | |||
| // Get returns result of a chord (synchronous blocking call) | |||
| func (chordAsyncResult *ChordAsyncResult) Get(sleepDuration time.Duration) ([]reflect.Value, error) { | |||
| if chordAsyncResult.backend == nil { | |||
| return nil, ErrBackendNotConfigured | |||
| } | |||
| var err error | |||
| for _, asyncResult := range chordAsyncResult.groupAsyncResults { | |||
| _, err = asyncResult.Get(sleepDuration) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| return chordAsyncResult.chordAsyncResult.Get(sleepDuration) | |||
| } | |||
| // GetWithTimeout returns results of a chain of tasks with timeout (synchronous blocking call) | |||
| func (chainAsyncResult *ChainAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) { | |||
| if chainAsyncResult.backend == nil { | |||
| return nil, ErrBackendNotConfigured | |||
| } | |||
| var ( | |||
| results []reflect.Value | |||
| err error | |||
| ) | |||
| timeout := time.NewTimer(timeoutDuration) | |||
| ln := len(chainAsyncResult.asyncResults) | |||
| lastResult := chainAsyncResult.asyncResults[ln-1] | |||
| for { | |||
| select { | |||
| case <-timeout.C: | |||
| return nil, ErrTimeoutReached | |||
| default: | |||
| for _, asyncResult := range chainAsyncResult.asyncResults { | |||
| _, errcur := asyncResult.Touch() | |||
| if errcur != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| results, err = lastResult.Touch() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if results != nil { | |||
| return results, err | |||
| } | |||
| time.Sleep(sleepDuration) | |||
| } | |||
| } | |||
| } | |||
| // GetWithTimeout returns result of a chord with a timeout (synchronous blocking call) | |||
| func (chordAsyncResult *ChordAsyncResult) GetWithTimeout(timeoutDuration, sleepDuration time.Duration) ([]reflect.Value, error) { | |||
| if chordAsyncResult.backend == nil { | |||
| return nil, ErrBackendNotConfigured | |||
| } | |||
| var ( | |||
| results []reflect.Value | |||
| err error | |||
| ) | |||
| timeout := time.NewTimer(timeoutDuration) | |||
| for { | |||
| select { | |||
| case <-timeout.C: | |||
| return nil, ErrTimeoutReached | |||
| default: | |||
| for _, asyncResult := range chordAsyncResult.groupAsyncResults { | |||
| _, errcur := asyncResult.Touch() | |||
| if errcur != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| results, err = chordAsyncResult.chordAsyncResult.Touch() | |||
| if err != nil { | |||
| return nil, nil | |||
| } | |||
| if results != nil { | |||
| return results, err | |||
| } | |||
| time.Sleep(sleepDuration) | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,424 @@ | |||
| package amqp | |||
| import ( | |||
| "bytes" | |||
| "context" | |||
| "encoding/json" | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/brokers/errs" | |||
| "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/pkg/errors" | |||
| "github.com/streadway/amqp" | |||
| ) | |||
| type AMQPConnection struct { | |||
| queueName string | |||
| connection *amqp.Connection | |||
| channel *amqp.Channel | |||
| queue amqp.Queue | |||
| confirmation <-chan amqp.Confirmation | |||
| errorchan <-chan *amqp.Error | |||
| cleanup chan struct{} | |||
| } | |||
| // Broker represents an AMQP broker | |||
| type Broker struct { | |||
| common.Broker | |||
| common.AMQPConnector | |||
| processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal | |||
| connections map[string]*AMQPConnection | |||
| connectionsMutex sync.RWMutex | |||
| } | |||
| // New creates new Broker instance | |||
| func New(cnf *config.Config) iface.Broker { | |||
| return &Broker{Broker: common.NewBroker(cnf), AMQPConnector: common.AMQPConnector{}, connections: make(map[string]*AMQPConnection)} | |||
| } | |||
| // StartConsuming enters a loop and waits for incoming messages | |||
| func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { | |||
| b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) | |||
| queueName := taskProcessor.CustomQueue() | |||
| if queueName == "" { | |||
| queueName = b.GetConfig().DefaultQueue | |||
| } | |||
| conn, channel, queue, _, amqpCloseChan, err := b.Connect( | |||
| b.GetConfig().Broker, | |||
| b.GetConfig().TLSConfig, | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| b.GetConfig().AMQP.ExchangeType, // exchange type | |||
| queueName, // queue name | |||
| true, // queue durable | |||
| false, // queue delete when unused | |||
| b.GetConfig().AMQP.BindingKey, // queue binding key | |||
| nil, // exchange declare args | |||
| nil, // queue declare args | |||
| amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args | |||
| ) | |||
| if err != nil { | |||
| b.GetRetryFunc()(b.GetRetryStopChan()) | |||
| return b.GetRetry(), err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| if err = channel.Qos( | |||
| b.GetConfig().AMQP.PrefetchCount, | |||
| 0, // prefetch size | |||
| false, // global | |||
| ); err != nil { | |||
| return b.GetRetry(), fmt.Errorf("Channel qos error: %s", err) | |||
| } | |||
| deliveries, err := channel.Consume( | |||
| queue.Name, // queue | |||
| consumerTag, // consumer tag | |||
| false, // auto-ack | |||
| false, // exclusive | |||
| false, // no-local | |||
| false, // no-wait | |||
| nil, // arguments | |||
| ) | |||
| if err != nil { | |||
| return b.GetRetry(), fmt.Errorf("Queue consume error: %s", err) | |||
| } | |||
| log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") | |||
| if err := b.consume(deliveries, concurrency, taskProcessor, amqpCloseChan); err != nil { | |||
| return b.GetRetry(), err | |||
| } | |||
| // Waiting for any tasks being processed to finish | |||
| b.processingWG.Wait() | |||
| return b.GetRetry(), nil | |||
| } | |||
| // StopConsuming quits the loop | |||
| func (b *Broker) StopConsuming() { | |||
| b.Broker.StopConsuming() | |||
| // Waiting for any tasks being processed to finish | |||
| b.processingWG.Wait() | |||
| } | |||
| // GetOrOpenConnection will return a connection on a particular queue name. Open connections | |||
| // are saved to avoid having to reopen connection for multiple queues | |||
| func (b *Broker) GetOrOpenConnection(queueName string, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*AMQPConnection, error) { | |||
| var err error | |||
| b.connectionsMutex.Lock() | |||
| defer b.connectionsMutex.Unlock() | |||
| conn, ok := b.connections[queueName] | |||
| if !ok { | |||
| conn = &AMQPConnection{ | |||
| queueName: queueName, | |||
| cleanup: make(chan struct{}), | |||
| } | |||
| conn.connection, conn.channel, conn.queue, conn.confirmation, conn.errorchan, err = b.Connect( | |||
| b.GetConfig().Broker, | |||
| b.GetConfig().TLSConfig, | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| b.GetConfig().AMQP.ExchangeType, // exchange type | |||
| queueName, // queue name | |||
| true, // queue durable | |||
| false, // queue delete when unused | |||
| queueBindingKey, // queue binding key | |||
| exchangeDeclareArgs, // exchange declare args | |||
| queueDeclareArgs, // queue declare args | |||
| queueBindingArgs, // queue binding args | |||
| ) | |||
| if err != nil { | |||
| return nil, errors.Wrapf(err, "Failed to connect to queue %s", queueName) | |||
| } | |||
| // Reconnect to the channel if it disconnects/errors out | |||
| go func() { | |||
| select { | |||
| case err = <-conn.errorchan: | |||
| log.INFO.Printf("Error occured on queue: %s. Reconnecting", queueName) | |||
| b.connectionsMutex.Lock() | |||
| delete(b.connections, queueName) | |||
| b.connectionsMutex.Unlock() | |||
| _, err := b.GetOrOpenConnection(queueName, queueBindingKey, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs) | |||
| if err != nil { | |||
| log.ERROR.Printf("Failed to reopen queue: %s.", queueName) | |||
| } | |||
| case <-conn.cleanup: | |||
| return | |||
| } | |||
| return | |||
| }() | |||
| b.connections[queueName] = conn | |||
| } | |||
| return conn, nil | |||
| } | |||
| func (b *Broker) CloseConnections() error { | |||
| b.connectionsMutex.Lock() | |||
| defer b.connectionsMutex.Unlock() | |||
| for key, conn := range b.connections { | |||
| if err := b.Close(conn.channel, conn.connection); err != nil { | |||
| log.ERROR.Print("Failed to close channel") | |||
| return nil | |||
| } | |||
| close(conn.cleanup) | |||
| delete(b.connections, key) | |||
| } | |||
| return nil | |||
| } | |||
| // Publish places a new message on the default queue | |||
| func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { | |||
| // Adjust routing key (this decides which queue the message will be published to) | |||
| b.AdjustRoutingKey(signature) | |||
| msg, err := json.Marshal(signature) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| // Check the ETA signature field, if it is set and it is in the future, | |||
| // delay the task | |||
| if signature.ETA != nil { | |||
| now := time.Now().UTC() | |||
| if signature.ETA.After(now) { | |||
| delayMs := int64(signature.ETA.Sub(now) / time.Millisecond) | |||
| return b.delay(signature, delayMs) | |||
| } | |||
| } | |||
| queue := b.GetConfig().DefaultQueue | |||
| bindingKey := b.GetConfig().AMQP.BindingKey // queue binding key | |||
| if b.isDirectExchange() { | |||
| queue = signature.RoutingKey | |||
| bindingKey = signature.RoutingKey | |||
| } | |||
| connection, err := b.GetOrOpenConnection( | |||
| queue, | |||
| bindingKey, // queue binding key | |||
| nil, // exchange declare args | |||
| nil, // queue declare args | |||
| amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args | |||
| ) | |||
| if err != nil { | |||
| return errors.Wrapf(err, "Failed to get a connection for queue %s", queue) | |||
| } | |||
| channel := connection.channel | |||
| confirmsChan := connection.confirmation | |||
| if err := channel.Publish( | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| signature.RoutingKey, // routing key | |||
| false, // mandatory | |||
| false, // immediate | |||
| amqp.Publishing{ | |||
| Headers: amqp.Table(signature.Headers), | |||
| ContentType: "application/json", | |||
| Body: msg, | |||
| DeliveryMode: amqp.Persistent, | |||
| }, | |||
| ); err != nil { | |||
| return errors.Wrap(err, "Failed to publish task") | |||
| } | |||
| confirmed := <-confirmsChan | |||
| if confirmed.Ack { | |||
| return nil | |||
| } | |||
| return fmt.Errorf("Failed delivery of delivery tag: %v", confirmed.DeliveryTag) | |||
| } | |||
| // consume takes delivered messages from the channel and manages a worker pool | |||
| // to process tasks concurrently | |||
| func (b *Broker) consume(deliveries <-chan amqp.Delivery, concurrency int, taskProcessor iface.TaskProcessor, amqpCloseChan <-chan *amqp.Error) error { | |||
| pool := make(chan struct{}, concurrency) | |||
| // initialize worker pool with maxWorkers workers | |||
| go func() { | |||
| for i := 0; i < concurrency; i++ { | |||
| pool <- struct{}{} | |||
| } | |||
| }() | |||
| errorsChan := make(chan error) | |||
| for { | |||
| select { | |||
| case amqpErr := <-amqpCloseChan: | |||
| return amqpErr | |||
| case err := <-errorsChan: | |||
| return err | |||
| case d := <-deliveries: | |||
| if concurrency > 0 { | |||
| // get worker from pool (blocks until one is available) | |||
| <-pool | |||
| } | |||
| b.processingWG.Add(1) | |||
| // Consume the task inside a gotourine so multiple tasks | |||
| // can be processed concurrently | |||
| go func() { | |||
| if err := b.consumeOne(d, taskProcessor); err != nil { | |||
| errorsChan <- err | |||
| } | |||
| b.processingWG.Done() | |||
| if concurrency > 0 { | |||
| // give worker back to pool | |||
| pool <- struct{}{} | |||
| } | |||
| }() | |||
| case <-b.GetStopChan(): | |||
| return nil | |||
| } | |||
| } | |||
| } | |||
| // consumeOne processes a single message using TaskProcessor | |||
| func (b *Broker) consumeOne(delivery amqp.Delivery, taskProcessor iface.TaskProcessor) error { | |||
| if len(delivery.Body) == 0 { | |||
| delivery.Nack(true, false) // multiple, requeue | |||
| return errors.New("Received an empty message") // RabbitMQ down? | |||
| } | |||
| var multiple, requeue = false, false | |||
| // Unmarshal message body into signature struct | |||
| signature := new(tasks.Signature) | |||
| decoder := json.NewDecoder(bytes.NewReader(delivery.Body)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(signature); err != nil { | |||
| delivery.Nack(multiple, requeue) | |||
| return errs.NewErrCouldNotUnmarshaTaskSignature(delivery.Body, err) | |||
| } | |||
| // If the task is not registered, we nack it and requeue, | |||
| // there might be different workers for processing specific tasks | |||
| if !b.IsTaskRegistered(signature.Name) { | |||
| if !delivery.Redelivered { | |||
| requeue = true | |||
| log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery.Body) | |||
| } | |||
| delivery.Nack(multiple, requeue) | |||
| return nil | |||
| } | |||
| log.INFO.Printf("Received new message: %s", delivery.Body) | |||
| err := taskProcessor.Process(signature) | |||
| delivery.Ack(multiple) | |||
| return err | |||
| } | |||
| // delay a task by delayDuration miliseconds, the way it works is a new queue | |||
| // is created without any consumers, the message is then published to this queue | |||
| // with appropriate ttl expiration headers, after the expiration, it is sent to | |||
| // the proper queue with consumers | |||
| func (b *Broker) delay(signature *tasks.Signature, delayMs int64) error { | |||
| if delayMs <= 0 { | |||
| return errors.New("Cannot delay task by 0ms") | |||
| } | |||
| message, err := json.Marshal(signature) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| // It's necessary to redeclare the queue each time (to zero its TTL timer). | |||
| queueName := fmt.Sprintf( | |||
| "delay.%d.%s.%s", | |||
| delayMs, // delay duration in mileseconds | |||
| b.GetConfig().AMQP.Exchange, | |||
| signature.RoutingKey, // routing key | |||
| ) | |||
| declareQueueArgs := amqp.Table{ | |||
| // Exchange where to send messages after TTL expiration. | |||
| "x-dead-letter-exchange": b.GetConfig().AMQP.Exchange, | |||
| // Routing key which use when resending expired messages. | |||
| "x-dead-letter-routing-key": signature.RoutingKey, | |||
| // Time in milliseconds | |||
| // after that message will expire and be sent to destination. | |||
| "x-message-ttl": delayMs, | |||
| // Time after that the queue will be deleted. | |||
| "x-expires": delayMs * 2, | |||
| } | |||
| conn, channel, _, _, _, err := b.Connect( | |||
| b.GetConfig().Broker, | |||
| b.GetConfig().TLSConfig, | |||
| b.GetConfig().AMQP.Exchange, // exchange name | |||
| b.GetConfig().AMQP.ExchangeType, // exchange type | |||
| queueName, // queue name | |||
| true, // queue durable | |||
| b.GetConfig().AMQP.AutoDelete, // queue delete when unused | |||
| queueName, // queue binding key | |||
| nil, // exchange declare args | |||
| declareQueueArgs, // queue declare args | |||
| amqp.Table(b.GetConfig().AMQP.QueueBindingArgs), // queue binding args | |||
| ) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer b.Close(channel, conn) | |||
| if err := channel.Publish( | |||
| b.GetConfig().AMQP.Exchange, // exchange | |||
| queueName, // routing key | |||
| false, // mandatory | |||
| false, // immediate | |||
| amqp.Publishing{ | |||
| Headers: amqp.Table(signature.Headers), | |||
| ContentType: "application/json", | |||
| Body: message, | |||
| DeliveryMode: amqp.Persistent, | |||
| }, | |||
| ); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (b *Broker) isDirectExchange() bool { | |||
| return b.GetConfig().AMQP != nil && b.GetConfig().AMQP.ExchangeType == "direct" | |||
| } | |||
| // AdjustRoutingKey makes sure the routing key is correct. | |||
| // If the routing key is an empty string: | |||
| // a) set it to binding key for direct exchange type | |||
| // b) set it to default queue name | |||
| func (b *Broker) AdjustRoutingKey(s *tasks.Signature) { | |||
| if s.RoutingKey != "" { | |||
| return | |||
| } | |||
| if b.isDirectExchange() { | |||
| // The routing algorithm behind a direct exchange is simple - a message goes | |||
| // to the queues whose binding key exactly matches the routing key of the message. | |||
| s.RoutingKey = b.GetConfig().AMQP.BindingKey | |||
| return | |||
| } | |||
| s.RoutingKey = b.GetConfig().DefaultQueue | |||
| } | |||
| @@ -0,0 +1,73 @@ | |||
| package eager | |||
| import ( | |||
| "bytes" | |||
| "context" | |||
| "encoding/json" | |||
| "errors" | |||
| "fmt" | |||
| "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // Broker represents an "eager" in-memory broker | |||
| type Broker struct { | |||
| worker iface.TaskProcessor | |||
| common.Broker | |||
| } | |||
| // New creates new Broker instance | |||
| func New() iface.Broker { | |||
| return new(Broker) | |||
| } | |||
| // Mode interface with methods specific for this broker | |||
| type Mode interface { | |||
| AssignWorker(p iface.TaskProcessor) | |||
| } | |||
| // StartConsuming enters a loop and waits for incoming messages | |||
| func (eagerBroker *Broker) StartConsuming(consumerTag string, concurrency int, p iface.TaskProcessor) (bool, error) { | |||
| return true, nil | |||
| } | |||
| // StopConsuming quits the loop | |||
| func (eagerBroker *Broker) StopConsuming() { | |||
| // do nothing | |||
| } | |||
| // Publish places a new message on the default queue | |||
| func (eagerBroker *Broker) Publish(ctx context.Context, task *tasks.Signature) error { | |||
| if eagerBroker.worker == nil { | |||
| return errors.New("worker is not assigned in eager-mode") | |||
| } | |||
| // faking the behavior to marshal input into json | |||
| // and unmarshal it back | |||
| message, err := json.Marshal(task) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| signature := new(tasks.Signature) | |||
| decoder := json.NewDecoder(bytes.NewReader(message)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(signature); err != nil { | |||
| return fmt.Errorf("JSON unmarshal error: %s", err) | |||
| } | |||
| // blocking call to the task directly | |||
| return eagerBroker.worker.Process(signature) | |||
| } | |||
| // GetPendingTasks returns a slice of task.Signatures waiting in the queue | |||
| func (eagerBroker *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { | |||
| return []*tasks.Signature{}, errors.New("Not implemented") | |||
| } | |||
| // AssignWorker assigns a worker to the eager broker | |||
| func (eagerBroker *Broker) AssignWorker(w iface.TaskProcessor) { | |||
| eagerBroker.worker = w | |||
| } | |||
| @@ -0,0 +1,25 @@ | |||
| package errs | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| ) | |||
| // ErrCouldNotUnmarshaTaskSignature ... | |||
| type ErrCouldNotUnmarshaTaskSignature struct { | |||
| msg []byte | |||
| reason string | |||
| } | |||
| // Error implements the error interface | |||
| func (e ErrCouldNotUnmarshaTaskSignature) Error() string { | |||
| return fmt.Sprintf("Could not unmarshal '%s' into a task signature: %v", e.msg, e.reason) | |||
| } | |||
| // NewErrCouldNotUnmarshaTaskSignature returns new ErrCouldNotUnmarshaTaskSignature instance | |||
| func NewErrCouldNotUnmarshaTaskSignature(msg []byte, err error) ErrCouldNotUnmarshaTaskSignature { | |||
| return ErrCouldNotUnmarshaTaskSignature{msg: msg, reason: err.Error()} | |||
| } | |||
| // ErrConsumerStopped indicates that the operation is now illegal because of the consumer being stopped. | |||
| var ErrConsumerStopped = errors.New("the server has been stopped") | |||
| @@ -0,0 +1,196 @@ | |||
| package gcppubsub | |||
| import ( | |||
| "bytes" | |||
| "context" | |||
| "encoding/json" | |||
| "fmt" | |||
| "time" | |||
| "cloud.google.com/go/pubsub" | |||
| "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // Broker represents an Google Cloud Pub/Sub broker | |||
| type Broker struct { | |||
| common.Broker | |||
| service *pubsub.Client | |||
| subscriptionName string | |||
| MaxExtension time.Duration | |||
| stopDone chan struct{} | |||
| } | |||
| // New creates new Broker instance | |||
| func New(cnf *config.Config, projectID, subscriptionName string) (iface.Broker, error) { | |||
| b := &Broker{Broker: common.NewBroker(cnf), stopDone: make(chan struct{})} | |||
| b.subscriptionName = subscriptionName | |||
| ctx := context.Background() | |||
| if cnf.GCPPubSub != nil { | |||
| b.MaxExtension = cnf.GCPPubSub.MaxExtension | |||
| } | |||
| if cnf.GCPPubSub != nil && cnf.GCPPubSub.Client != nil { | |||
| b.service = cnf.GCPPubSub.Client | |||
| } else { | |||
| pubsubClient, err := pubsub.NewClient(ctx, projectID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| b.service = pubsubClient | |||
| cnf.GCPPubSub = &config.GCPPubSubConfig{ | |||
| Client: pubsubClient, | |||
| } | |||
| } | |||
| // Validate topic exists | |||
| defaultQueue := b.GetConfig().DefaultQueue | |||
| topic := b.service.Topic(defaultQueue) | |||
| defer topic.Stop() | |||
| topicExists, err := topic.Exists(ctx) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if !topicExists { | |||
| return nil, fmt.Errorf("topic does not exist, instead got %s", defaultQueue) | |||
| } | |||
| // Validate subscription exists | |||
| sub := b.service.Subscription(b.subscriptionName) | |||
| if b.MaxExtension != 0 { | |||
| sub.ReceiveSettings.MaxExtension = b.MaxExtension | |||
| } | |||
| subscriptionExists, err := sub.Exists(ctx) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if !subscriptionExists { | |||
| return nil, fmt.Errorf("subscription does not exist, instead got %s", b.subscriptionName) | |||
| } | |||
| return b, nil | |||
| } | |||
| // StartConsuming enters a loop and waits for incoming messages | |||
| func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { | |||
| b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) | |||
| sub := b.service.Subscription(b.subscriptionName) | |||
| if b.MaxExtension != 0 { | |||
| sub.ReceiveSettings.MaxExtension = b.MaxExtension | |||
| } | |||
| sub.ReceiveSettings.NumGoroutines = concurrency | |||
| log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") | |||
| ctx, cancel := context.WithCancel(context.Background()) | |||
| go func() { | |||
| <-b.GetStopChan() | |||
| cancel() | |||
| }() | |||
| for { | |||
| err := sub.Receive(ctx, func(_ctx context.Context, msg *pubsub.Message) { | |||
| b.consumeOne(msg, taskProcessor) | |||
| }) | |||
| if err == nil { | |||
| break | |||
| } | |||
| log.ERROR.Printf("Error when receiving messages. Error: %v", err) | |||
| continue | |||
| } | |||
| close(b.stopDone) | |||
| return b.GetRetry(), nil | |||
| } | |||
| // StopConsuming quits the loop | |||
| func (b *Broker) StopConsuming() { | |||
| b.Broker.StopConsuming() | |||
| // Waiting for any tasks being processed to finish | |||
| <-b.stopDone | |||
| } | |||
| // Publish places a new message on the default queue or the queue pointed to | |||
| // by the routing key | |||
| func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { | |||
| // Adjust routing key (this decides which queue the message will be published to) | |||
| b.AdjustRoutingKey(signature) | |||
| msg, err := json.Marshal(signature) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| topic := b.service.Topic(signature.RoutingKey) | |||
| defer topic.Stop() | |||
| // Check the ETA signature field, if it is set and it is in the future, | |||
| // delay the task | |||
| if signature.ETA != nil { | |||
| now := time.Now().UTC() | |||
| if signature.ETA.After(now) { | |||
| topic.PublishSettings.DelayThreshold = signature.ETA.Sub(now) | |||
| } | |||
| } | |||
| result := topic.Publish(ctx, &pubsub.Message{ | |||
| Data: msg, | |||
| }) | |||
| id, err := result.Get(ctx) | |||
| if err != nil { | |||
| log.ERROR.Printf("Error when sending a message: %v", err) | |||
| return err | |||
| } | |||
| log.INFO.Printf("Sending a message successfully, server-generated message ID %v", id) | |||
| return nil | |||
| } | |||
| // consumeOne processes a single message using TaskProcessor | |||
| func (b *Broker) consumeOne(delivery *pubsub.Message, taskProcessor iface.TaskProcessor) { | |||
| if len(delivery.Data) == 0 { | |||
| delivery.Nack() | |||
| log.ERROR.Printf("received an empty message, the delivery was %v", delivery) | |||
| } | |||
| sig := new(tasks.Signature) | |||
| decoder := json.NewDecoder(bytes.NewBuffer(delivery.Data)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(sig); err != nil { | |||
| delivery.Nack() | |||
| log.ERROR.Printf("unmarshal error. the delivery is %v", delivery) | |||
| } | |||
| // If the task is not registered return an error | |||
| // and leave the message in the queue | |||
| if !b.IsTaskRegistered(sig.Name) { | |||
| delivery.Nack() | |||
| log.ERROR.Printf("task %s is not registered", sig.Name) | |||
| } | |||
| err := taskProcessor.Process(sig) | |||
| if err != nil { | |||
| delivery.Nack() | |||
| log.ERROR.Printf("Failed process of task", err) | |||
| } | |||
| // Call Ack() after successfully consuming and processing the message | |||
| delivery.Ack() | |||
| } | |||
| @@ -0,0 +1,27 @@ | |||
| package iface | |||
| import ( | |||
| "context" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // Broker - a common interface for all brokers | |||
| type Broker interface { | |||
| GetConfig() *config.Config | |||
| SetRegisteredTaskNames(names []string) | |||
| IsTaskRegistered(name string) bool | |||
| StartConsuming(consumerTag string, concurrency int, p TaskProcessor) (bool, error) | |||
| StopConsuming() | |||
| Publish(ctx context.Context, task *tasks.Signature) error | |||
| GetPendingTasks(queue string) ([]*tasks.Signature, error) | |||
| AdjustRoutingKey(s *tasks.Signature) | |||
| } | |||
| // TaskProcessor - can process a delivered task | |||
| // This will probably always be a worker instance | |||
| type TaskProcessor interface { | |||
| Process(signature *tasks.Signature) error | |||
| CustomQueue() string | |||
| } | |||
| @@ -0,0 +1,418 @@ | |||
| package redis | |||
| import ( | |||
| "bytes" | |||
| "context" | |||
| "encoding/json" | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/brokers/errs" | |||
| "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/RichardKnop/redsync" | |||
| "github.com/gomodule/redigo/redis" | |||
| ) | |||
| var redisDelayedTasksKey = "delayed_tasks" | |||
| // Broker represents a Redis broker | |||
| type Broker struct { | |||
| common.Broker | |||
| common.RedisConnector | |||
| host string | |||
| password string | |||
| db int | |||
| pool *redis.Pool | |||
| consumingWG sync.WaitGroup // wait group to make sure whole consumption completes | |||
| processingWG sync.WaitGroup // use wait group to make sure task processing completes | |||
| delayedWG sync.WaitGroup | |||
| // If set, path to a socket file overrides hostname | |||
| socketPath string | |||
| redsync *redsync.Redsync | |||
| redisOnce sync.Once | |||
| } | |||
| // New creates new Broker instance | |||
| func New(cnf *config.Config, host, password, socketPath string, db int) iface.Broker { | |||
| b := &Broker{Broker: common.NewBroker(cnf)} | |||
| b.host = host | |||
| b.db = db | |||
| b.password = password | |||
| b.socketPath = socketPath | |||
| return b | |||
| } | |||
| // StartConsuming enters a loop and waits for incoming messages | |||
| func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { | |||
| b.consumingWG.Add(1) | |||
| defer b.consumingWG.Done() | |||
| if concurrency < 1 { | |||
| concurrency = 1 | |||
| } | |||
| b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| // Ping the server to make sure connection is live | |||
| _, err := conn.Do("PING") | |||
| if err != nil { | |||
| b.GetRetryFunc()(b.GetRetryStopChan()) | |||
| // Return err if retry is still true. | |||
| // If retry is false, broker.StopConsuming() has been called and | |||
| // therefore Redis might have been stopped. Return nil exit | |||
| // StartConsuming() | |||
| if b.GetRetry() { | |||
| return b.GetRetry(), err | |||
| } | |||
| return b.GetRetry(), errs.ErrConsumerStopped | |||
| } | |||
| // Channel to which we will push tasks ready for processing by worker | |||
| deliveries := make(chan []byte, concurrency) | |||
| pool := make(chan struct{}, concurrency) | |||
| // initialize worker pool with maxWorkers workers | |||
| for i := 0; i < concurrency; i++ { | |||
| pool <- struct{}{} | |||
| } | |||
| // A receiving goroutine keeps popping messages from the queue by BLPOP | |||
| // If the message is valid and can be unmarshaled into a proper structure | |||
| // we send it to the deliveries channel | |||
| go func() { | |||
| log.INFO.Print("[*] Waiting for messages. To exit press CTRL+C") | |||
| for { | |||
| select { | |||
| // A way to stop this goroutine from b.StopConsuming | |||
| case <-b.GetStopChan(): | |||
| close(deliveries) | |||
| return | |||
| case <-pool: | |||
| task, _ := b.nextTask(getQueue(b.GetConfig(), taskProcessor)) | |||
| //TODO: should this error be ignored? | |||
| if len(task) > 0 { | |||
| deliveries <- task | |||
| } | |||
| pool <- struct{}{} | |||
| } | |||
| } | |||
| }() | |||
| // A goroutine to watch for delayed tasks and push them to deliveries | |||
| // channel for consumption by the worker | |||
| b.delayedWG.Add(1) | |||
| go func() { | |||
| defer b.delayedWG.Done() | |||
| for { | |||
| select { | |||
| // A way to stop this goroutine from b.StopConsuming | |||
| case <-b.GetStopChan(): | |||
| return | |||
| default: | |||
| task, err := b.nextDelayedTask(redisDelayedTasksKey) | |||
| if err != nil { | |||
| continue | |||
| } | |||
| signature := new(tasks.Signature) | |||
| decoder := json.NewDecoder(bytes.NewReader(task)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(signature); err != nil { | |||
| log.ERROR.Print(errs.NewErrCouldNotUnmarshaTaskSignature(task, err)) | |||
| } | |||
| if err := b.Publish(context.Background(), signature); err != nil { | |||
| log.ERROR.Print(err) | |||
| } | |||
| } | |||
| } | |||
| }() | |||
| if err := b.consume(deliveries, concurrency, taskProcessor); err != nil { | |||
| return b.GetRetry(), err | |||
| } | |||
| // Waiting for any tasks being processed to finish | |||
| b.processingWG.Wait() | |||
| return b.GetRetry(), nil | |||
| } | |||
| // StopConsuming quits the loop | |||
| func (b *Broker) StopConsuming() { | |||
| b.Broker.StopConsuming() | |||
| // Waiting for the delayed tasks goroutine to have stopped | |||
| b.delayedWG.Wait() | |||
| // Waiting for consumption to finish | |||
| b.consumingWG.Wait() | |||
| if b.pool != nil { | |||
| b.pool.Close() | |||
| } | |||
| } | |||
| // Publish places a new message on the default queue | |||
| func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { | |||
| // Adjust routing key (this decides which queue the message will be published to) | |||
| b.Broker.AdjustRoutingKey(signature) | |||
| msg, err := json.Marshal(signature) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| // Check the ETA signature field, if it is set and it is in the future, | |||
| // delay the task | |||
| if signature.ETA != nil { | |||
| now := time.Now().UTC() | |||
| if signature.ETA.After(now) { | |||
| score := signature.ETA.UnixNano() | |||
| _, err = conn.Do("ZADD", redisDelayedTasksKey, score, msg) | |||
| return err | |||
| } | |||
| } | |||
| _, err = conn.Do("RPUSH", signature.RoutingKey, msg) | |||
| return err | |||
| } | |||
| // GetPendingTasks returns a slice of task signatures waiting in the queue | |||
| func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| if queue == "" { | |||
| queue = b.GetConfig().DefaultQueue | |||
| } | |||
| dataBytes, err := conn.Do("LRANGE", queue, 0, -1) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| results, err := redis.ByteSlices(dataBytes, err) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| taskSignatures := make([]*tasks.Signature, len(results)) | |||
| for i, result := range results { | |||
| signature := new(tasks.Signature) | |||
| decoder := json.NewDecoder(bytes.NewReader(result)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(signature); err != nil { | |||
| return nil, err | |||
| } | |||
| taskSignatures[i] = signature | |||
| } | |||
| return taskSignatures, nil | |||
| } | |||
| // consume takes delivered messages from the channel and manages a worker pool | |||
| // to process tasks concurrently | |||
| func (b *Broker) consume(deliveries <-chan []byte, concurrency int, taskProcessor iface.TaskProcessor) error { | |||
| errorsChan := make(chan error, concurrency*2) | |||
| pool := make(chan struct{}, concurrency) | |||
| // init pool for Worker tasks execution, as many slots as Worker concurrency param | |||
| go func() { | |||
| for i := 0; i < concurrency; i++ { | |||
| pool <- struct{}{} | |||
| } | |||
| }() | |||
| for { | |||
| select { | |||
| case err := <-errorsChan: | |||
| return err | |||
| case d, open := <-deliveries: | |||
| if !open { | |||
| return nil | |||
| } | |||
| if concurrency > 0 { | |||
| // get execution slot from pool (blocks until one is available) | |||
| <-pool | |||
| } | |||
| b.processingWG.Add(1) | |||
| // Consume the task inside a goroutine so multiple tasks | |||
| // can be processed concurrently | |||
| go func() { | |||
| if err := b.consumeOne(d, taskProcessor); err != nil { | |||
| errorsChan <- err | |||
| } | |||
| b.processingWG.Done() | |||
| if concurrency > 0 { | |||
| // give slot back to pool | |||
| pool <- struct{}{} | |||
| } | |||
| }() | |||
| } | |||
| } | |||
| } | |||
| // consumeOne processes a single message using TaskProcessor | |||
| func (b *Broker) consumeOne(delivery []byte, taskProcessor iface.TaskProcessor) error { | |||
| signature := new(tasks.Signature) | |||
| decoder := json.NewDecoder(bytes.NewReader(delivery)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(signature); err != nil { | |||
| return errs.NewErrCouldNotUnmarshaTaskSignature(delivery, err) | |||
| } | |||
| // If the task is not registered, we requeue it, | |||
| // there might be different workers for processing specific tasks | |||
| if !b.IsTaskRegistered(signature.Name) { | |||
| log.INFO.Printf("Task not registered with this worker. Requeing message: %s", delivery) | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| conn.Do("RPUSH", getQueue(b.GetConfig(), taskProcessor), delivery) | |||
| return nil | |||
| } | |||
| log.DEBUG.Printf("Received new message: %s", delivery) | |||
| return taskProcessor.Process(signature) | |||
| } | |||
| // nextTask pops next available task from the default queue | |||
| func (b *Broker) nextTask(queue string) (result []byte, err error) { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| pollPeriodMilliseconds := 1000 // default poll period for normal tasks | |||
| if b.GetConfig().Redis != nil { | |||
| configuredPollPeriod := b.GetConfig().Redis.NormalTasksPollPeriod | |||
| if configuredPollPeriod > 0 { | |||
| pollPeriodMilliseconds = configuredPollPeriod | |||
| } | |||
| } | |||
| pollPeriod := time.Duration(pollPeriodMilliseconds) * time.Millisecond | |||
| items, err := redis.ByteSlices(conn.Do("BLPOP", queue, pollPeriod.Seconds())) | |||
| if err != nil { | |||
| return []byte{}, err | |||
| } | |||
| // items[0] - the name of the key where an element was popped | |||
| // items[1] - the value of the popped element | |||
| if len(items) != 2 { | |||
| return []byte{}, redis.ErrNil | |||
| } | |||
| result = items[1] | |||
| return result, nil | |||
| } | |||
| // nextDelayedTask pops a value from the ZSET key using WATCH/MULTI/EXEC commands. | |||
| // https://github.com/gomodule/redigo/blob/master/redis/zpop_example_test.go | |||
| func (b *Broker) nextDelayedTask(key string) (result []byte, err error) { | |||
| conn := b.open() | |||
| defer conn.Close() | |||
| defer func() { | |||
| // Return connection to normal state on error. | |||
| // https://redis.io/commands/discard | |||
| if err != nil { | |||
| conn.Do("DISCARD") | |||
| } | |||
| }() | |||
| var ( | |||
| items [][]byte | |||
| reply interface{} | |||
| ) | |||
| pollPeriod := 500 // default poll period for delayed tasks | |||
| if b.GetConfig().Redis != nil { | |||
| configuredPollPeriod := b.GetConfig().Redis.DelayedTasksPollPeriod | |||
| // the default period is 0, which bombards redis with requests, despite | |||
| // our intention of doing the opposite | |||
| if configuredPollPeriod > 0 { | |||
| pollPeriod = configuredPollPeriod | |||
| } | |||
| } | |||
| for { | |||
| // Space out queries to ZSET so we don't bombard redis | |||
| // server with relentless ZRANGEBYSCOREs | |||
| time.Sleep(time.Duration(pollPeriod) * time.Millisecond) | |||
| if _, err = conn.Do("WATCH", key); err != nil { | |||
| return | |||
| } | |||
| now := time.Now().UTC().UnixNano() | |||
| // https://redis.io/commands/zrangebyscore | |||
| items, err = redis.ByteSlices(conn.Do( | |||
| "ZRANGEBYSCORE", | |||
| key, | |||
| 0, | |||
| now, | |||
| "LIMIT", | |||
| 0, | |||
| 1, | |||
| )) | |||
| if err != nil { | |||
| return | |||
| } | |||
| if len(items) != 1 { | |||
| err = redis.ErrNil | |||
| return | |||
| } | |||
| _ = conn.Send("MULTI") | |||
| _ = conn.Send("ZREM", key, items[0]) | |||
| reply, err = conn.Do("EXEC") | |||
| if err != nil { | |||
| return | |||
| } | |||
| if reply != nil { | |||
| result = items[0] | |||
| break | |||
| } | |||
| } | |||
| return | |||
| } | |||
| // open returns or creates instance of Redis connection | |||
| func (b *Broker) open() redis.Conn { | |||
| b.redisOnce.Do(func() { | |||
| b.pool = b.NewPool(b.socketPath, b.host, b.password, b.db, b.GetConfig().Redis, b.GetConfig().TLSConfig) | |||
| b.redsync = redsync.New([]redsync.Pool{b.pool}) | |||
| }) | |||
| return b.pool.Get() | |||
| } | |||
| func getQueue(config *config.Config, taskProcessor iface.TaskProcessor) string { | |||
| customQueue := taskProcessor.CustomQueue() | |||
| if customQueue == "" { | |||
| return config.DefaultQueue | |||
| } | |||
| return customQueue | |||
| } | |||
| @@ -0,0 +1,361 @@ | |||
| package sqs | |||
| import ( | |||
| "context" | |||
| "encoding/json" | |||
| "errors" | |||
| "fmt" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| "github.com/RichardKnop/machinery/v1/common" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/aws/aws-sdk-go/aws" | |||
| "github.com/aws/aws-sdk-go/aws/session" | |||
| "github.com/aws/aws-sdk-go/service/sqs/sqsiface" | |||
| awssqs "github.com/aws/aws-sdk-go/service/sqs" | |||
| ) | |||
| const ( | |||
| maxAWSSQSDelay = time.Minute * 15 // Max supported SQS delay is 15 min: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html | |||
| ) | |||
| // Broker represents a AWS SQS broker | |||
| // There are examples on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sqs-example-create-queue.html | |||
| type Broker struct { | |||
| common.Broker | |||
| processingWG sync.WaitGroup // use wait group to make sure task processing completes on interrupt signal | |||
| receivingWG sync.WaitGroup | |||
| stopReceivingChan chan int | |||
| sess *session.Session | |||
| service sqsiface.SQSAPI | |||
| queueUrl *string | |||
| } | |||
| // New creates new Broker instance | |||
| func New(cnf *config.Config) iface.Broker { | |||
| b := &Broker{Broker: common.NewBroker(cnf)} | |||
| if cnf.SQS != nil && cnf.SQS.Client != nil { | |||
| // Use provided *SQS client | |||
| b.service = cnf.SQS.Client | |||
| } else { | |||
| // Initialize a session that the SDK will use to load credentials from the shared credentials file, ~/.aws/credentials. | |||
| // See details on: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html | |||
| // Also, env AWS_REGION is also required | |||
| b.sess = session.Must(session.NewSessionWithOptions(session.Options{ | |||
| SharedConfigState: session.SharedConfigEnable, | |||
| })) | |||
| b.service = awssqs.New(b.sess) | |||
| } | |||
| return b | |||
| } | |||
| // GetPendingTasks returns a slice of task.Signatures waiting in the queue | |||
| func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { | |||
| return nil, errors.New("Not implemented") | |||
| } | |||
| // StartConsuming enters a loop and waits for incoming messages | |||
| func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) (bool, error) { | |||
| b.Broker.StartConsuming(consumerTag, concurrency, taskProcessor) | |||
| qURL := b.getQueueURL(taskProcessor) | |||
| //save it so that it can be used later when attempting to delete task | |||
| b.queueUrl = qURL | |||
| deliveries := make(chan *awssqs.ReceiveMessageOutput, concurrency) | |||
| pool := make(chan struct{}, concurrency) | |||
| // initialize worker pool with maxWorkers workers | |||
| for i := 0; i < concurrency; i++ { | |||
| pool <- struct{}{} | |||
| } | |||
| b.stopReceivingChan = make(chan int) | |||
| b.receivingWG.Add(1) | |||
| go func() { | |||
| defer b.receivingWG.Done() | |||
| log.INFO.Printf("[*] Waiting for messages on queue: %s. To exit press CTRL+C\n", *qURL) | |||
| for { | |||
| select { | |||
| // A way to stop this goroutine from b.StopConsuming | |||
| case <-b.stopReceivingChan: | |||
| close(deliveries) | |||
| return | |||
| case <-pool: | |||
| output, err := b.receiveMessage(qURL) | |||
| if err == nil && len(output.Messages) > 0 { | |||
| deliveries <- output | |||
| } else { | |||
| //return back to pool right away | |||
| pool <- struct{}{} | |||
| if err != nil { | |||
| log.ERROR.Printf("Queue consume error: %s", err) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| }() | |||
| if err := b.consume(deliveries, concurrency, taskProcessor, pool); err != nil { | |||
| return b.GetRetry(), err | |||
| } | |||
| return b.GetRetry(), nil | |||
| } | |||
| // StopConsuming quits the loop | |||
| func (b *Broker) StopConsuming() { | |||
| b.Broker.StopConsuming() | |||
| b.stopReceiving() | |||
| // Waiting for any tasks being processed to finish | |||
| b.processingWG.Wait() | |||
| // Waiting for the receiving goroutine to have stopped | |||
| b.receivingWG.Wait() | |||
| } | |||
| // Publish places a new message on the default queue | |||
| func (b *Broker) Publish(ctx context.Context, signature *tasks.Signature) error { | |||
| msg, err := json.Marshal(signature) | |||
| if err != nil { | |||
| return fmt.Errorf("JSON marshal error: %s", err) | |||
| } | |||
| // Check that signature.RoutingKey is set, if not switch to DefaultQueue | |||
| b.AdjustRoutingKey(signature) | |||
| MsgInput := &awssqs.SendMessageInput{ | |||
| MessageBody: aws.String(string(msg)), | |||
| QueueUrl: aws.String(b.GetConfig().Broker + "/" + signature.RoutingKey), | |||
| } | |||
| // if this is a fifo queue, there needs to be some additional parameters. | |||
| if strings.HasSuffix(signature.RoutingKey, ".fifo") { | |||
| // Use Machinery's signature Task UUID as SQS Message Group ID. | |||
| MsgDedupID := signature.UUID | |||
| MsgInput.MessageDeduplicationId = aws.String(MsgDedupID) | |||
| // Do not Use Machinery's signature Group UUID as SQS Message Group ID, instead use BrokerMessageGroupId | |||
| MsgGroupID := signature.BrokerMessageGroupId | |||
| if MsgGroupID == "" { | |||
| return fmt.Errorf("please specify BrokerMessageGroupId attribute for task Signature when submitting a task to FIFO queue") | |||
| } | |||
| MsgInput.MessageGroupId = aws.String(MsgGroupID) | |||
| } | |||
| // Check the ETA signature field, if it is set and it is in the future, | |||
| // and is not a fifo queue, set a delay in seconds for the task. | |||
| if signature.ETA != nil && !strings.HasSuffix(signature.RoutingKey, ".fifo") { | |||
| now := time.Now().UTC() | |||
| delay := signature.ETA.Sub(now) | |||
| if delay > 0 { | |||
| if delay > maxAWSSQSDelay { | |||
| return errors.New("Max AWS SQS delay exceeded") | |||
| } | |||
| MsgInput.DelaySeconds = aws.Int64(int64(delay.Seconds())) | |||
| } | |||
| } | |||
| result, err := b.service.SendMessageWithContext(ctx, MsgInput) | |||
| if err != nil { | |||
| log.ERROR.Printf("Error when sending a message: %v", err) | |||
| return err | |||
| } | |||
| log.INFO.Printf("Sending a message successfully, the messageId is %v", *result.MessageId) | |||
| return nil | |||
| } | |||
| // consume is a method which keeps consuming deliveries from a channel, until there is an error or a stop signal | |||
| func (b *Broker) consume(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}) error { | |||
| errorsChan := make(chan error) | |||
| for { | |||
| whetherContinue, err := b.consumeDeliveries(deliveries, concurrency, taskProcessor, pool, errorsChan) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if whetherContinue == false { | |||
| return nil | |||
| } | |||
| } | |||
| } | |||
| // consumeOne is a method consumes a delivery. If a delivery was consumed successfully, it will be deleted from AWS SQS | |||
| func (b *Broker) consumeOne(delivery *awssqs.ReceiveMessageOutput, taskProcessor iface.TaskProcessor) error { | |||
| if len(delivery.Messages) == 0 { | |||
| log.ERROR.Printf("received an empty message, the delivery was %v", delivery) | |||
| return errors.New("received empty message, the delivery is " + delivery.GoString()) | |||
| } | |||
| sig := new(tasks.Signature) | |||
| decoder := json.NewDecoder(strings.NewReader(*delivery.Messages[0].Body)) | |||
| decoder.UseNumber() | |||
| if err := decoder.Decode(sig); err != nil { | |||
| log.ERROR.Printf("unmarshal error. the delivery is %v", delivery) | |||
| return err | |||
| } | |||
| if delivery.Messages[0].ReceiptHandle != nil { | |||
| sig.SQSReceiptHandle = *delivery.Messages[0].ReceiptHandle | |||
| } | |||
| // If the task is not registered return an error | |||
| // and leave the message in the queue | |||
| if !b.IsTaskRegistered(sig.Name) { | |||
| return fmt.Errorf("task %s is not registered", sig.Name) | |||
| } | |||
| err := taskProcessor.Process(sig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Delete message after successfully consuming and processing the message | |||
| if err = b.deleteOne(delivery); err != nil { | |||
| log.ERROR.Printf("error when deleting the delivery. delivery is %v, Error=%s", delivery, err) | |||
| } | |||
| return err | |||
| } | |||
| // deleteOne is a method delete a delivery from AWS SQS | |||
| func (b *Broker) deleteOne(delivery *awssqs.ReceiveMessageOutput) error { | |||
| qURL := b.defaultQueueURL() | |||
| _, err := b.service.DeleteMessage(&awssqs.DeleteMessageInput{ | |||
| QueueUrl: qURL, | |||
| ReceiptHandle: delivery.Messages[0].ReceiptHandle, | |||
| }) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // defaultQueueURL is a method returns the default queue url | |||
| func (b *Broker) defaultQueueURL() *string { | |||
| if b.queueUrl != nil { | |||
| return b.queueUrl | |||
| } else { | |||
| return aws.String(b.GetConfig().Broker + "/" + b.GetConfig().DefaultQueue) | |||
| } | |||
| } | |||
| // receiveMessage is a method receives a message from specified queue url | |||
| func (b *Broker) receiveMessage(qURL *string) (*awssqs.ReceiveMessageOutput, error) { | |||
| var waitTimeSeconds int | |||
| var visibilityTimeout *int | |||
| if b.GetConfig().SQS != nil { | |||
| waitTimeSeconds = b.GetConfig().SQS.WaitTimeSeconds | |||
| visibilityTimeout = b.GetConfig().SQS.VisibilityTimeout | |||
| } else { | |||
| waitTimeSeconds = 0 | |||
| } | |||
| input := &awssqs.ReceiveMessageInput{ | |||
| AttributeNames: []*string{ | |||
| aws.String(awssqs.MessageSystemAttributeNameSentTimestamp), | |||
| }, | |||
| MessageAttributeNames: []*string{ | |||
| aws.String(awssqs.QueueAttributeNameAll), | |||
| }, | |||
| QueueUrl: qURL, | |||
| MaxNumberOfMessages: aws.Int64(1), | |||
| WaitTimeSeconds: aws.Int64(int64(waitTimeSeconds)), | |||
| } | |||
| if visibilityTimeout != nil { | |||
| input.VisibilityTimeout = aws.Int64(int64(*visibilityTimeout)) | |||
| } | |||
| result, err := b.service.ReceiveMessage(input) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return result, err | |||
| } | |||
| // initializePool is a method which initializes concurrency pool | |||
| func (b *Broker) initializePool(pool chan struct{}, concurrency int) { | |||
| for i := 0; i < concurrency; i++ { | |||
| pool <- struct{}{} | |||
| } | |||
| } | |||
| // consumeDeliveries is a method consuming deliveries from deliveries channel | |||
| func (b *Broker) consumeDeliveries(deliveries <-chan *awssqs.ReceiveMessageOutput, concurrency int, taskProcessor iface.TaskProcessor, pool chan struct{}, errorsChan chan error) (bool, error) { | |||
| select { | |||
| case err := <-errorsChan: | |||
| return false, err | |||
| case d := <-deliveries: | |||
| b.processingWG.Add(1) | |||
| // Consume the task inside a goroutine so multiple tasks | |||
| // can be processed concurrently | |||
| go func() { | |||
| if err := b.consumeOne(d, taskProcessor); err != nil { | |||
| errorsChan <- err | |||
| } | |||
| b.processingWG.Done() | |||
| if concurrency > 0 { | |||
| // give worker back to pool | |||
| pool <- struct{}{} | |||
| } | |||
| }() | |||
| case <-b.GetStopChan(): | |||
| return false, nil | |||
| } | |||
| return true, nil | |||
| } | |||
| // continueReceivingMessages is a method returns a continue signal | |||
| func (b *Broker) continueReceivingMessages(qURL *string, deliveries chan *awssqs.ReceiveMessageOutput) (bool, error) { | |||
| select { | |||
| // A way to stop this goroutine from b.StopConsuming | |||
| case <-b.stopReceivingChan: | |||
| return false, nil | |||
| default: | |||
| output, err := b.receiveMessage(qURL) | |||
| if err != nil { | |||
| return true, err | |||
| } | |||
| if len(output.Messages) == 0 { | |||
| return true, nil | |||
| } | |||
| go func() { deliveries <- output }() | |||
| } | |||
| return true, nil | |||
| } | |||
| // stopReceiving is a method sending a signal to stopReceivingChan | |||
| func (b *Broker) stopReceiving() { | |||
| // Stop the receiving goroutine | |||
| b.stopReceivingChan <- 1 | |||
| } | |||
| // getQueueURL is a method returns that returns queueURL first by checking if custom queue was set and usign it | |||
| // otherwise using default queueName from config | |||
| func (b *Broker) getQueueURL(taskProcessor iface.TaskProcessor) *string { | |||
| queueName := b.GetConfig().DefaultQueue | |||
| if taskProcessor.CustomQueue() != "" { | |||
| queueName = taskProcessor.CustomQueue() | |||
| } | |||
| return aws.String(b.GetConfig().Broker + "/" + queueName) | |||
| } | |||
| @@ -0,0 +1,129 @@ | |||
| package common | |||
| import ( | |||
| "crypto/tls" | |||
| "fmt" | |||
| "github.com/streadway/amqp" | |||
| ) | |||
| // AMQPConnector ... | |||
| type AMQPConnector struct{} | |||
| // Connect opens a connection to RabbitMQ, declares an exchange, opens a channel, | |||
| // declares and binds the queue and enables publish notifications | |||
| func (ac *AMQPConnector) Connect(url string, tlsConfig *tls.Config, exchange, exchangeType, queueName string, queueDurable, queueDelete bool, queueBindingKey string, exchangeDeclareArgs, queueDeclareArgs, queueBindingArgs amqp.Table) (*amqp.Connection, *amqp.Channel, amqp.Queue, <-chan amqp.Confirmation, <-chan *amqp.Error, error) { | |||
| // Connect to server | |||
| conn, channel, err := ac.Open(url, tlsConfig) | |||
| if err != nil { | |||
| return nil, nil, amqp.Queue{}, nil, nil, err | |||
| } | |||
| if exchange != "" { | |||
| // Declare an exchange | |||
| if err = channel.ExchangeDeclare( | |||
| exchange, // name of the exchange | |||
| exchangeType, // type | |||
| true, // durable | |||
| false, // delete when complete | |||
| false, // internal | |||
| false, // noWait | |||
| exchangeDeclareArgs, // arguments | |||
| ); err != nil { | |||
| return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Exchange declare error: %s", err) | |||
| } | |||
| } | |||
| var queue amqp.Queue | |||
| if queueName != "" { | |||
| // Declare a queue | |||
| queue, err = channel.QueueDeclare( | |||
| queueName, // name | |||
| queueDurable, // durable | |||
| queueDelete, // delete when unused | |||
| false, // exclusive | |||
| false, // no-wait | |||
| queueDeclareArgs, // arguments | |||
| ) | |||
| if err != nil { | |||
| return conn, channel, amqp.Queue{}, nil, nil, fmt.Errorf("Queue declare error: %s", err) | |||
| } | |||
| // Bind the queue | |||
| if err = channel.QueueBind( | |||
| queue.Name, // name of the queue | |||
| queueBindingKey, // binding key | |||
| exchange, // source exchange | |||
| false, // noWait | |||
| queueBindingArgs, // arguments | |||
| ); err != nil { | |||
| return conn, channel, queue, nil, nil, fmt.Errorf("Queue bind error: %s", err) | |||
| } | |||
| } | |||
| // Enable publish confirmations | |||
| if err = channel.Confirm(false); err != nil { | |||
| return conn, channel, queue, nil, nil, fmt.Errorf("Channel could not be put into confirm mode: %s", err) | |||
| } | |||
| return conn, channel, queue, channel.NotifyPublish(make(chan amqp.Confirmation, 1)), conn.NotifyClose(make(chan *amqp.Error, 1)), nil | |||
| } | |||
| // DeleteQueue deletes a queue by name | |||
| func (ac *AMQPConnector) DeleteQueue(channel *amqp.Channel, queueName string) error { | |||
| // First return value is number of messages removed | |||
| _, err := channel.QueueDelete( | |||
| queueName, // name | |||
| false, // ifUnused | |||
| false, // ifEmpty | |||
| false, // noWait | |||
| ) | |||
| return err | |||
| } | |||
| // InspectQueue provides information about a specific queue | |||
| func (*AMQPConnector) InspectQueue(channel *amqp.Channel, queueName string) (*amqp.Queue, error) { | |||
| queueState, err := channel.QueueInspect(queueName) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("Queue inspect error: %s", err) | |||
| } | |||
| return &queueState, nil | |||
| } | |||
| // Open new RabbitMQ connection | |||
| func (ac *AMQPConnector) Open(url string, tlsConfig *tls.Config) (*amqp.Connection, *amqp.Channel, error) { | |||
| // Connect | |||
| // From amqp docs: DialTLS will use the provided tls.Config when it encounters an amqps:// scheme | |||
| // and will dial a plain connection when it encounters an amqp:// scheme. | |||
| conn, err := amqp.DialTLS(url, tlsConfig) | |||
| if err != nil { | |||
| return nil, nil, fmt.Errorf("Dial error: %s", err) | |||
| } | |||
| // Open a channel | |||
| channel, err := conn.Channel() | |||
| if err != nil { | |||
| return nil, nil, fmt.Errorf("Open channel error: %s", err) | |||
| } | |||
| return conn, channel, nil | |||
| } | |||
| // Close connection | |||
| func (ac *AMQPConnector) Close(channel *amqp.Channel, conn *amqp.Connection) error { | |||
| if channel != nil { | |||
| if err := channel.Close(); err != nil { | |||
| return fmt.Errorf("Close channel error: %s", err) | |||
| } | |||
| } | |||
| if conn != nil { | |||
| if err := conn.Close(); err != nil { | |||
| return fmt.Errorf("Close connection error: %s", err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,25 @@ | |||
| package common | |||
| import ( | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| ) | |||
| // Backend represents a base backend structure | |||
| type Backend struct { | |||
| cnf *config.Config | |||
| } | |||
| // NewBackend creates new Backend instance | |||
| func NewBackend(cnf *config.Config) Backend { | |||
| return Backend{cnf: cnf} | |||
| } | |||
| // GetConfig returns config | |||
| func (b *Backend) GetConfig() *config.Config { | |||
| return b.cnf | |||
| } | |||
| // IsAMQP ... | |||
| func (b *Backend) IsAMQP() bool { | |||
| return false | |||
| } | |||
| @@ -0,0 +1,121 @@ | |||
| package common | |||
| import ( | |||
| "errors" | |||
| "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/retry" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| ) | |||
| // Broker represents a base broker structure | |||
| type Broker struct { | |||
| cnf *config.Config | |||
| registeredTaskNames []string | |||
| retry bool | |||
| retryFunc func(chan int) | |||
| retryStopChan chan int | |||
| stopChan chan int | |||
| } | |||
| // NewBroker creates new Broker instance | |||
| func NewBroker(cnf *config.Config) Broker { | |||
| return Broker{ | |||
| cnf: cnf, | |||
| retry: true, | |||
| stopChan: make(chan int), | |||
| retryStopChan: make(chan int), | |||
| } | |||
| } | |||
| // GetConfig returns config | |||
| func (b *Broker) GetConfig() *config.Config { | |||
| return b.cnf | |||
| } | |||
| // GetRetry ... | |||
| func (b *Broker) GetRetry() bool { | |||
| return b.retry | |||
| } | |||
| // GetRetryFunc ... | |||
| func (b *Broker) GetRetryFunc() func(chan int) { | |||
| return b.retryFunc | |||
| } | |||
| // GetRetryStopChan ... | |||
| func (b *Broker) GetRetryStopChan() chan int { | |||
| return b.retryStopChan | |||
| } | |||
| // GetStopChan ... | |||
| func (b *Broker) GetStopChan() chan int { | |||
| return b.stopChan | |||
| } | |||
| // Publish places a new message on the default queue | |||
| func (b *Broker) Publish(signature *tasks.Signature) error { | |||
| return errors.New("Not implemented") | |||
| } | |||
| // SetRegisteredTaskNames sets registered task names | |||
| func (b *Broker) SetRegisteredTaskNames(names []string) { | |||
| b.registeredTaskNames = names | |||
| } | |||
| // IsTaskRegistered returns true if the task is registered with this broker | |||
| func (b *Broker) IsTaskRegistered(name string) bool { | |||
| for _, registeredTaskName := range b.registeredTaskNames { | |||
| if registeredTaskName == name { | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| // GetPendingTasks returns a slice of task.Signatures waiting in the queue | |||
| func (b *Broker) GetPendingTasks(queue string) ([]*tasks.Signature, error) { | |||
| return nil, errors.New("Not implemented") | |||
| } | |||
| // StartConsuming is a common part of StartConsuming method | |||
| func (b *Broker) StartConsuming(consumerTag string, concurrency int, taskProcessor iface.TaskProcessor) { | |||
| if b.retryFunc == nil { | |||
| b.retryFunc = retry.Closure() | |||
| } | |||
| } | |||
| // StopConsuming is a common part of StopConsuming | |||
| func (b *Broker) StopConsuming() { | |||
| // Do not retry from now on | |||
| b.retry = false | |||
| // Stop the retry closure earlier | |||
| select { | |||
| case b.retryStopChan <- 1: | |||
| log.WARNING.Print("Stopping retry closure.") | |||
| default: | |||
| } | |||
| // Notifying the stop channel stops consuming of messages | |||
| close(b.stopChan) | |||
| log.WARNING.Print("Stop channel") | |||
| } | |||
| // GetRegisteredTaskNames returns registered tasks names | |||
| func (b *Broker) GetRegisteredTaskNames() []string { | |||
| return b.registeredTaskNames | |||
| } | |||
| // AdjustRoutingKey makes sure the routing key is correct. | |||
| // If the routing key is an empty string: | |||
| // a) set it to binding key for direct exchange type | |||
| // b) set it to default queue name | |||
| func (b *Broker) AdjustRoutingKey(s *tasks.Signature) { | |||
| if s.RoutingKey != "" { | |||
| return | |||
| } | |||
| s.RoutingKey = b.GetConfig().DefaultQueue | |||
| } | |||
| @@ -0,0 +1,84 @@ | |||
| package common | |||
| import ( | |||
| "crypto/tls" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/gomodule/redigo/redis" | |||
| ) | |||
| var ( | |||
| defaultConfig = &config.RedisConfig{ | |||
| MaxIdle: 3, | |||
| IdleTimeout: 240, | |||
| ReadTimeout: 15, | |||
| WriteTimeout: 15, | |||
| ConnectTimeout: 15, | |||
| NormalTasksPollPeriod: 1000, | |||
| DelayedTasksPollPeriod: 20, | |||
| } | |||
| ) | |||
| // RedisConnector ... | |||
| type RedisConnector struct{} | |||
| // NewPool returns a new pool of Redis connections | |||
| func (rc *RedisConnector) NewPool(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) *redis.Pool { | |||
| if cnf == nil { | |||
| cnf = defaultConfig | |||
| } | |||
| return &redis.Pool{ | |||
| MaxIdle: cnf.MaxIdle, | |||
| IdleTimeout: time.Duration(cnf.IdleTimeout) * time.Second, | |||
| MaxActive: cnf.MaxActive, | |||
| Wait: cnf.Wait, | |||
| Dial: func() (redis.Conn, error) { | |||
| c, err := rc.open(socketPath, host, password, db, cnf, tlsConfig) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if db != 0 { | |||
| _, err = c.Do("SELECT", db) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| return c, err | |||
| }, | |||
| // PINGs connections that have been idle more than 10 seconds | |||
| TestOnBorrow: func(c redis.Conn, t time.Time) error { | |||
| if time.Since(t) < time.Duration(10*time.Second) { | |||
| return nil | |||
| } | |||
| _, err := c.Do("PING") | |||
| return err | |||
| }, | |||
| } | |||
| } | |||
| // Open a new Redis connection | |||
| func (rc *RedisConnector) open(socketPath, host, password string, db int, cnf *config.RedisConfig, tlsConfig *tls.Config) (redis.Conn, error) { | |||
| var opts = []redis.DialOption{ | |||
| redis.DialDatabase(db), | |||
| redis.DialReadTimeout(time.Duration(cnf.ReadTimeout) * time.Second), | |||
| redis.DialWriteTimeout(time.Duration(cnf.WriteTimeout) * time.Second), | |||
| redis.DialConnectTimeout(time.Duration(cnf.ConnectTimeout) * time.Second), | |||
| } | |||
| if tlsConfig != nil { | |||
| opts = append(opts, redis.DialTLSConfig(tlsConfig), redis.DialUseTLS(true)) | |||
| } | |||
| if password != "" { | |||
| opts = append(opts, redis.DialPassword(password)) | |||
| } | |||
| if socketPath != "" { | |||
| return redis.Dial("unix", socketPath, opts...) | |||
| } | |||
| return redis.Dial("tcp", host, opts...) | |||
| } | |||
| @@ -0,0 +1,161 @@ | |||
| package config | |||
| import ( | |||
| "crypto/tls" | |||
| "fmt" | |||
| "strings" | |||
| "time" | |||
| "cloud.google.com/go/pubsub" | |||
| "github.com/aws/aws-sdk-go/service/dynamodb" | |||
| "github.com/aws/aws-sdk-go/service/sqs" | |||
| "go.mongodb.org/mongo-driver/mongo" | |||
| ) | |||
| const ( | |||
| // DefaultResultsExpireIn is a default time used to expire task states and group metadata from the backend | |||
| DefaultResultsExpireIn = 3600 | |||
| ) | |||
| var ( | |||
| // Start with sensible default values | |||
| defaultCnf = &Config{ | |||
| Broker: "amqp://guest:guest@localhost:5672/", | |||
| DefaultQueue: "machinery_tasks", | |||
| ResultBackend: "amqp://guest:guest@localhost:5672/", | |||
| ResultsExpireIn: DefaultResultsExpireIn, | |||
| AMQP: &AMQPConfig{ | |||
| Exchange: "machinery_exchange", | |||
| ExchangeType: "direct", | |||
| BindingKey: "machinery_task", | |||
| PrefetchCount: 3, | |||
| }, | |||
| DynamoDB: &DynamoDBConfig{ | |||
| TaskStatesTable: "task_states", | |||
| GroupMetasTable: "group_metas", | |||
| }, | |||
| Redis: &RedisConfig{ | |||
| MaxIdle: 3, | |||
| IdleTimeout: 240, | |||
| ReadTimeout: 15, | |||
| WriteTimeout: 15, | |||
| ConnectTimeout: 15, | |||
| NormalTasksPollPeriod: 1000, | |||
| DelayedTasksPollPeriod: 20, | |||
| }, | |||
| GCPPubSub: &GCPPubSubConfig{ | |||
| Client: nil, | |||
| }, | |||
| } | |||
| reloadDelay = time.Second * 10 | |||
| ) | |||
| // Config holds all configuration for our program | |||
| type Config struct { | |||
| Broker string `yaml:"broker" envconfig:"BROKER"` | |||
| DefaultQueue string `yaml:"default_queue" envconfig:"DEFAULT_QUEUE"` | |||
| ResultBackend string `yaml:"result_backend" envconfig:"RESULT_BACKEND"` | |||
| ResultsExpireIn int `yaml:"results_expire_in" envconfig:"RESULTS_EXPIRE_IN"` | |||
| AMQP *AMQPConfig `yaml:"amqp"` | |||
| SQS *SQSConfig `yaml:"sqs"` | |||
| Redis *RedisConfig `yaml:"redis"` | |||
| GCPPubSub *GCPPubSubConfig `yaml:"-" ignored:"true"` | |||
| MongoDB *MongoDBConfig `yaml:"-" ignored:"true"` | |||
| TLSConfig *tls.Config | |||
| // NoUnixSignals - when set disables signal handling in machinery | |||
| NoUnixSignals bool `yaml:"no_unix_signals" envconfig:"NO_UNIX_SIGNALS"` | |||
| DynamoDB *DynamoDBConfig `yaml:"dynamodb"` | |||
| } | |||
| // QueueBindingArgs arguments which are used when binding to the exchange | |||
| type QueueBindingArgs map[string]interface{} | |||
| // AMQPConfig wraps RabbitMQ related configuration | |||
| type AMQPConfig struct { | |||
| Exchange string `yaml:"exchange" envconfig:"AMQP_EXCHANGE"` | |||
| ExchangeType string `yaml:"exchange_type" envconfig:"AMQP_EXCHANGE_TYPE"` | |||
| QueueBindingArgs QueueBindingArgs `yaml:"queue_binding_args" envconfig:"AMQP_QUEUE_BINDING_ARGS"` | |||
| BindingKey string `yaml:"binding_key" envconfig:"AMQP_BINDING_KEY"` | |||
| PrefetchCount int `yaml:"prefetch_count" envconfig:"AMQP_PREFETCH_COUNT"` | |||
| AutoDelete bool `yaml:"auto_delete" envconfig:"AMQP_AUTO_DELETE"` | |||
| } | |||
| // DynamoDBConfig wraps DynamoDB related configuration | |||
| type DynamoDBConfig struct { | |||
| Client *dynamodb.DynamoDB | |||
| TaskStatesTable string `yaml:"task_states_table" envconfig:"TASK_STATES_TABLE"` | |||
| GroupMetasTable string `yaml:"group_metas_table" envconfig:"GROUP_METAS_TABLE"` | |||
| } | |||
| // SQSConfig wraps SQS related configuration | |||
| type SQSConfig struct { | |||
| Client *sqs.SQS | |||
| WaitTimeSeconds int `yaml:"receive_wait_time_seconds" envconfig:"SQS_WAIT_TIME_SECONDS"` | |||
| // https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html | |||
| // visibility timeout should default to nil to use the overall visibility timeout for the queue | |||
| VisibilityTimeout *int `yaml:"receive_visibility_timeout" envconfig:"SQS_VISIBILITY_TIMEOUT"` | |||
| } | |||
| // RedisConfig ... | |||
| type RedisConfig struct { | |||
| // Maximum number of idle connections in the pool. | |||
| MaxIdle int `yaml:"max_idle" envconfig:"REDIS_MAX_IDLE"` | |||
| // Maximum number of connections allocated by the pool at a given time. | |||
| // When zero, there is no limit on the number of connections in the pool. | |||
| MaxActive int `yaml:"max_active" envconfig:"REDIS_MAX_ACTIVE"` | |||
| // Close connections after remaining idle for this duration in seconds. If the value | |||
| // is zero, then idle connections are not closed. Applications should set | |||
| // the timeout to a value less than the server's timeout. | |||
| IdleTimeout int `yaml:"max_idle_timeout" envconfig:"REDIS_IDLE_TIMEOUT"` | |||
| // If Wait is true and the pool is at the MaxActive limit, then Get() waits | |||
| // for a connection to be returned to the pool before returning. | |||
| Wait bool `yaml:"wait" envconfig:"REDIS_WAIT"` | |||
| // ReadTimeout specifies the timeout in seconds for reading a single command reply. | |||
| ReadTimeout int `yaml:"read_timeout" envconfig:"REDIS_READ_TIMEOUT"` | |||
| // WriteTimeout specifies the timeout in seconds for writing a single command. | |||
| WriteTimeout int `yaml:"write_timeout" envconfig:"REDIS_WRITE_TIMEOUT"` | |||
| // ConnectTimeout specifies the timeout in seconds for connecting to the Redis server when | |||
| // no DialNetDial option is specified. | |||
| ConnectTimeout int `yaml:"connect_timeout" envconfig:"REDIS_CONNECT_TIMEOUT"` | |||
| // NormalTasksPollPeriod specifies the period in milliseconds when polling redis for normal tasks | |||
| NormalTasksPollPeriod int `yaml:"normal_tasks_poll_period" envconfig:"REDIS_NORMAL_TASKS_POLL_PERIOD"` | |||
| // DelayedTasksPollPeriod specifies the period in milliseconds when polling redis for delayed tasks | |||
| DelayedTasksPollPeriod int `yaml:"delayed_tasks_poll_period" envconfig:"REDIS_DELAYED_TASKS_POLL_PERIOD"` | |||
| } | |||
| // GCPPubSubConfig wraps GCP PubSub related configuration | |||
| type GCPPubSubConfig struct { | |||
| Client *pubsub.Client | |||
| MaxExtension time.Duration | |||
| } | |||
| // MongoDBConfig ... | |||
| type MongoDBConfig struct { | |||
| Client *mongo.Client | |||
| Database string | |||
| } | |||
| // Decode from yaml to map (any field whose type or pointer-to-type implements | |||
| // envconfig.Decoder can control its own deserialization) | |||
| func (args *QueueBindingArgs) Decode(value string) error { | |||
| pairs := strings.Split(value, ",") | |||
| mp := make(map[string]interface{}, len(pairs)) | |||
| for _, pair := range pairs { | |||
| kvpair := strings.Split(pair, ":") | |||
| if len(kvpair) != 2 { | |||
| return fmt.Errorf("invalid map item: %q", pair) | |||
| } | |||
| mp[kvpair[0]] = kvpair[1] | |||
| } | |||
| *args = QueueBindingArgs(mp) | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,58 @@ | |||
| package config | |||
| import ( | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/kelseyhightower/envconfig" | |||
| ) | |||
| // NewFromEnvironment creates a config object from environment variables | |||
| func NewFromEnvironment(keepReloading bool) (*Config, error) { | |||
| cnf, err := fromEnvironment() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| log.INFO.Print("Successfully loaded config from the environment") | |||
| if keepReloading { | |||
| // Open a goroutine to watch remote changes forever | |||
| go func() { | |||
| for { | |||
| // Delay after each request | |||
| time.Sleep(reloadDelay) | |||
| // Attempt to reload the config | |||
| newCnf, newErr := fromEnvironment() | |||
| if newErr != nil { | |||
| log.WARNING.Printf("Failed to reload config from the environment: %v", newErr) | |||
| continue | |||
| } | |||
| *cnf = *newCnf | |||
| // log.INFO.Printf("Successfully reloaded config from the environment") | |||
| } | |||
| }() | |||
| } | |||
| return cnf, nil | |||
| } | |||
| func fromEnvironment() (*Config, error) { | |||
| loadedCnf, cnf := new(Config), new(Config) | |||
| *cnf = *defaultCnf | |||
| if err := envconfig.Process("", cnf); err != nil { | |||
| return nil, err | |||
| } | |||
| if err := envconfig.Process("", loadedCnf); err != nil { | |||
| return nil, err | |||
| } | |||
| if loadedCnf.AMQP == nil { | |||
| cnf.AMQP = nil | |||
| } | |||
| return cnf, nil | |||
| } | |||
| @@ -0,0 +1,83 @@ | |||
| package config | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "gopkg.in/yaml.v2" | |||
| ) | |||
| // NewFromYaml creates a config object from YAML file | |||
| func NewFromYaml(cnfPath string, keepReloading bool) (*Config, error) { | |||
| cnf, err := fromFile(cnfPath) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| log.INFO.Printf("Successfully loaded config from file %s", cnfPath) | |||
| if keepReloading { | |||
| // Open a goroutine to watch remote changes forever | |||
| go func() { | |||
| for { | |||
| // Delay after each request | |||
| time.Sleep(reloadDelay) | |||
| // Attempt to reload the config | |||
| newCnf, newErr := fromFile(cnfPath) | |||
| if newErr != nil { | |||
| log.WARNING.Printf("Failed to reload config from file %s: %v", cnfPath, newErr) | |||
| continue | |||
| } | |||
| *cnf = *newCnf | |||
| // log.INFO.Printf("Successfully reloaded config from file %s", cnfPath) | |||
| } | |||
| }() | |||
| } | |||
| return cnf, nil | |||
| } | |||
| // ReadFromFile reads data from a file | |||
| func ReadFromFile(cnfPath string) ([]byte, error) { | |||
| file, err := os.Open(cnfPath) | |||
| // Config file not found | |||
| if err != nil { | |||
| return nil, fmt.Errorf("Open file error: %s", err) | |||
| } | |||
| // Config file found, let's try to read it | |||
| data := make([]byte, 1000) | |||
| count, err := file.Read(data) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("Read from file error: %s", err) | |||
| } | |||
| return data[:count], nil | |||
| } | |||
| func fromFile(cnfPath string) (*Config, error) { | |||
| loadedCnf, cnf := new(Config), new(Config) | |||
| *cnf = *defaultCnf | |||
| data, err := ReadFromFile(cnfPath) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if err := yaml.Unmarshal(data, cnf); err != nil { | |||
| return nil, fmt.Errorf("Unmarshal YAML error: %s", err) | |||
| } | |||
| if err := yaml.Unmarshal(data, loadedCnf); err != nil { | |||
| return nil, fmt.Errorf("Unmarshal YAML error: %s", err) | |||
| } | |||
| if loadedCnf.AMQP == nil { | |||
| cnf.AMQP = nil | |||
| } | |||
| return cnf, nil | |||
| } | |||
| @@ -0,0 +1,9 @@ | |||
| BROKER=broker | |||
| DEFAULT_QUEUE=default_queue | |||
| RESULT_BACKEND=result_backend | |||
| RESULTS_EXPIRE_IN=123456 | |||
| AMQP_BINDING_KEY=binding_key | |||
| AMQP_EXCHANGE=exchange | |||
| AMQP_EXCHANGE_TYPE=exchange_type | |||
| AMQP_PREFETCH_COUNT=123 | |||
| AMQP_QUEUE_BINDING_ARGS=image-type:png,x-match:any | |||
| @@ -0,0 +1,13 @@ | |||
| --- | |||
| broker: broker | |||
| default_queue: default_queue | |||
| result_backend: result_backend | |||
| results_expire_in: 123456 | |||
| amqp: | |||
| binding_key: binding_key | |||
| exchange: exchange | |||
| exchange_type: exchange_type | |||
| prefetch_count: 123 | |||
| queue_binding_args: | |||
| image-type: png | |||
| x-match: any | |||
| @@ -0,0 +1,268 @@ | |||
| package machinery | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| neturl "net/url" | |||
| "os" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| amqpbroker "github.com/RichardKnop/machinery/v1/brokers/amqp" | |||
| eagerbroker "github.com/RichardKnop/machinery/v1/brokers/eager" | |||
| gcppubsubbroker "github.com/RichardKnop/machinery/v1/brokers/gcppubsub" | |||
| brokeriface "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| redisbroker "github.com/RichardKnop/machinery/v1/brokers/redis" | |||
| sqsbroker "github.com/RichardKnop/machinery/v1/brokers/sqs" | |||
| amqpbackend "github.com/RichardKnop/machinery/v1/backends/amqp" | |||
| dynamobackend "github.com/RichardKnop/machinery/v1/backends/dynamodb" | |||
| eagerbackend "github.com/RichardKnop/machinery/v1/backends/eager" | |||
| backendiface "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| memcachebackend "github.com/RichardKnop/machinery/v1/backends/memcache" | |||
| mongobackend "github.com/RichardKnop/machinery/v1/backends/mongo" | |||
| nullbackend "github.com/RichardKnop/machinery/v1/backends/null" | |||
| redisbackend "github.com/RichardKnop/machinery/v1/backends/redis" | |||
| ) | |||
| // BrokerFactory creates a new object of iface.Broker | |||
| // Currently only AMQP/S broker is supported | |||
| func BrokerFactory(cnf *config.Config) (brokeriface.Broker, error) { | |||
| if strings.HasPrefix(cnf.Broker, "amqp://") { | |||
| return amqpbroker.New(cnf), nil | |||
| } | |||
| if strings.HasPrefix(cnf.Broker, "amqps://") { | |||
| return amqpbroker.New(cnf), nil | |||
| } | |||
| if strings.HasPrefix(cnf.Broker, "redis://") { | |||
| parts := strings.Split(cnf.Broker, "redis://") | |||
| if len(parts) != 2 { | |||
| return nil, fmt.Errorf( | |||
| "Redis broker connection string should be in format redis://host:port, instead got %s", | |||
| cnf.Broker, | |||
| ) | |||
| } | |||
| redisHost, redisPassword, redisDB, err := ParseRedisURL(cnf.Broker) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return redisbroker.New(cnf, redisHost, redisPassword, "", redisDB), nil | |||
| } | |||
| if strings.HasPrefix(cnf.Broker, "redis+socket://") { | |||
| redisSocket, redisPassword, redisDB, err := ParseRedisSocketURL(cnf.Broker) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return redisbroker.New(cnf, "", redisPassword, redisSocket, redisDB), nil | |||
| } | |||
| if strings.HasPrefix(cnf.Broker, "eager") { | |||
| return eagerbroker.New(), nil | |||
| } | |||
| if _, ok := os.LookupEnv("DISABLE_STRICT_SQS_CHECK"); ok { | |||
| //disable SQS name check, so that users can use this with local simulated SQS | |||
| //where sql broker url might not start with https://sqs | |||
| //even when disabling strict SQS naming check, make sure its still a valid http URL | |||
| if strings.HasPrefix(cnf.Broker, "https://") || strings.HasPrefix(cnf.Broker, "http://") { | |||
| return sqsbroker.New(cnf), nil | |||
| } | |||
| } else { | |||
| if strings.HasPrefix(cnf.Broker, "https://sqs") { | |||
| return sqsbroker.New(cnf), nil | |||
| } | |||
| } | |||
| if strings.HasPrefix(cnf.Broker, "gcppubsub://") { | |||
| projectID, subscriptionName, err := ParseGCPPubSubURL(cnf.Broker) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return gcppubsubbroker.New(cnf, projectID, subscriptionName) | |||
| } | |||
| return nil, fmt.Errorf("Factory failed with broker URL: %v", cnf.Broker) | |||
| } | |||
| // BackendFactory creates a new object of backends.Interface | |||
| // Currently supported backends are AMQP/S and Memcache | |||
| func BackendFactory(cnf *config.Config) (backendiface.Backend, error) { | |||
| if strings.HasPrefix(cnf.ResultBackend, "amqp://") { | |||
| return amqpbackend.New(cnf), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "amqps://") { | |||
| return amqpbackend.New(cnf), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "memcache://") { | |||
| parts := strings.Split(cnf.ResultBackend, "memcache://") | |||
| if len(parts) != 2 { | |||
| return nil, fmt.Errorf( | |||
| "Memcache result backend connection string should be in format memcache://server1:port,server2:port, instead got %s", | |||
| cnf.ResultBackend, | |||
| ) | |||
| } | |||
| servers := strings.Split(parts[1], ",") | |||
| return memcachebackend.New(cnf, servers), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "redis://") { | |||
| redisHost, redisPassword, redisDB, err := ParseRedisURL(cnf.ResultBackend) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return redisbackend.New(cnf, redisHost, redisPassword, "", redisDB), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "redis+socket://") { | |||
| redisSocket, redisPassword, redisDB, err := ParseRedisSocketURL(cnf.ResultBackend) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return redisbackend.New(cnf, "", redisPassword, redisSocket, redisDB), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "mongodb://") || | |||
| strings.HasPrefix(cnf.ResultBackend, "mongodb+srv://") { | |||
| return mongobackend.New(cnf) | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "eager") { | |||
| return eagerbackend.New(), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "null") { | |||
| return nullbackend.New(), nil | |||
| } | |||
| if strings.HasPrefix(cnf.ResultBackend, "https://dynamodb") { | |||
| return dynamobackend.New(cnf), nil | |||
| } | |||
| return nil, fmt.Errorf("Factory failed with result backend: %v", cnf.ResultBackend) | |||
| } | |||
| // ParseRedisURL ... | |||
| func ParseRedisURL(url string) (host, password string, db int, err error) { | |||
| // redis://pwd@host/db | |||
| var u *neturl.URL | |||
| u, err = neturl.Parse(url) | |||
| if err != nil { | |||
| return | |||
| } | |||
| if u.Scheme != "redis" { | |||
| err = errors.New("No redis scheme found") | |||
| return | |||
| } | |||
| if u.User != nil { | |||
| var exists bool | |||
| password, exists = u.User.Password() | |||
| if !exists { | |||
| password = u.User.Username() | |||
| } | |||
| } | |||
| host = u.Host | |||
| parts := strings.Split(u.Path, "/") | |||
| if len(parts) == 1 { | |||
| db = 0 //default redis db | |||
| } else { | |||
| db, err = strconv.Atoi(parts[1]) | |||
| if err != nil { | |||
| db, err = 0, nil //ignore err here | |||
| } | |||
| } | |||
| return | |||
| } | |||
| // ParseRedisSocketURL extracts Redis connection options from a URL with the | |||
| // redis+socket:// scheme. This scheme is not standard (or even de facto) and | |||
| // is used as a transitional mechanism until the the config package gains the | |||
| // proper facilities to support socket-based connections. | |||
| func ParseRedisSocketURL(url string) (path, password string, db int, err error) { | |||
| parts := strings.Split(url, "redis+socket://") | |||
| if parts[0] != "" { | |||
| err = errors.New("No redis scheme found") | |||
| return | |||
| } | |||
| // redis+socket://password@/path/to/file.soc:/db | |||
| if len(parts) != 2 { | |||
| err = fmt.Errorf("Redis socket connection string should be in format redis+socket://password@/path/to/file.sock:/db, instead got %s", url) | |||
| return | |||
| } | |||
| remainder := parts[1] | |||
| // Extract password if any | |||
| parts = strings.SplitN(remainder, "@", 2) | |||
| if len(parts) == 2 { | |||
| password = parts[0] | |||
| remainder = parts[1] | |||
| } else { | |||
| remainder = parts[0] | |||
| } | |||
| // Extract path | |||
| parts = strings.SplitN(remainder, ":", 2) | |||
| path = parts[0] | |||
| if path == "" { | |||
| err = fmt.Errorf("Redis socket connection string should be in format redis+socket://password@/path/to/file.sock:/db, instead got %s", url) | |||
| return | |||
| } | |||
| if len(parts) == 2 { | |||
| remainder = parts[1] | |||
| } | |||
| // Extract DB if any | |||
| parts = strings.SplitN(remainder, "/", 2) | |||
| if len(parts) == 2 { | |||
| db, _ = strconv.Atoi(parts[1]) | |||
| } | |||
| return | |||
| } | |||
| // ParseGCPPubSubURL Parse GCP Pub/Sub URL | |||
| // url: gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME | |||
| func ParseGCPPubSubURL(url string) (string, string, error) { | |||
| parts := strings.Split(url, "gcppubsub://") | |||
| if parts[0] != "" { | |||
| return "", "", errors.New("No gcppubsub scheme found") | |||
| } | |||
| if len(parts) != 2 { | |||
| return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url) | |||
| } | |||
| remainder := parts[1] | |||
| parts = strings.Split(remainder, "/") | |||
| if len(parts) == 2 { | |||
| if len(parts[0]) == 0 { | |||
| return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url) | |||
| } | |||
| if len(parts[1]) == 0 { | |||
| return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url) | |||
| } | |||
| return parts[0], parts[1], nil | |||
| } | |||
| return "", "", fmt.Errorf("gcppubsub scheme should be in format gcppubsub://YOUR_GCP_PROJECT_ID/YOUR_PUBSUB_SUBSCRIPTION_NAME, instead got %s", url) | |||
| } | |||
| @@ -0,0 +1,54 @@ | |||
| package log | |||
| import ( | |||
| "github.com/RichardKnop/logging" | |||
| ) | |||
| var ( | |||
| logger = logging.New(nil, nil, new(logging.ColouredFormatter)) | |||
| // DEBUG ... | |||
| DEBUG = logger[logging.DEBUG] | |||
| // INFO ... | |||
| INFO = logger[logging.INFO] | |||
| // WARNING ... | |||
| WARNING = logger[logging.WARNING] | |||
| // ERROR ... | |||
| ERROR = logger[logging.ERROR] | |||
| // FATAL ... | |||
| FATAL = logger[logging.FATAL] | |||
| ) | |||
| // Set sets a custom logger for all log levels | |||
| func Set(l logging.LoggerInterface) { | |||
| DEBUG = l | |||
| INFO = l | |||
| WARNING = l | |||
| ERROR = l | |||
| FATAL = l | |||
| } | |||
| // SetDebug sets a custom logger for DEBUG level logs | |||
| func SetDebug(l logging.LoggerInterface) { | |||
| DEBUG = l | |||
| } | |||
| // SetInfo sets a custom logger for INFO level logs | |||
| func SetInfo(l logging.LoggerInterface) { | |||
| INFO = l | |||
| } | |||
| // SetWarning sets a custom logger for WARNING level logs | |||
| func SetWarning(l logging.LoggerInterface) { | |||
| WARNING = l | |||
| } | |||
| // SetError sets a custom logger for ERROR level logs | |||
| func SetError(l logging.LoggerInterface) { | |||
| ERROR = l | |||
| } | |||
| // SetFatal sets a custom logger for FATAL level logs | |||
| func SetFatal(l logging.LoggerInterface) { | |||
| FATAL = l | |||
| } | |||
| @@ -0,0 +1 @@ | |||
| package machinery | |||
| @@ -0,0 +1,20 @@ | |||
| package retry | |||
| // Fibonacci returns successive Fibonacci numbers starting from 1 | |||
| func Fibonacci() func() int { | |||
| a, b := 0, 1 | |||
| return func() int { | |||
| a, b = b, a+b | |||
| return a | |||
| } | |||
| } | |||
| // FibonacciNext returns next number in Fibonacci sequence greater than start | |||
| func FibonacciNext(start int) int { | |||
| fib := Fibonacci() | |||
| num := fib() | |||
| for num <= start { | |||
| num = fib() | |||
| } | |||
| return num | |||
| } | |||
| @@ -0,0 +1,31 @@ | |||
| package retry | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| ) | |||
| // Closure - a useful closure we can use when there is a problem | |||
| // connecting to the broker. It uses Fibonacci sequence to space out retry attempts | |||
| var Closure = func() func(chan int) { | |||
| retryIn := 0 | |||
| fibonacci := Fibonacci() | |||
| return func(stopChan chan int) { | |||
| if retryIn > 0 { | |||
| durationString := fmt.Sprintf("%vs", retryIn) | |||
| duration, _ := time.ParseDuration(durationString) | |||
| log.WARNING.Printf("Retrying in %v seconds", retryIn) | |||
| select { | |||
| case <-stopChan: | |||
| break | |||
| case <-time.After(duration): | |||
| break | |||
| } | |||
| } | |||
| retryIn = fibonacci() | |||
| } | |||
| } | |||
| @@ -0,0 +1,329 @@ | |||
| package machinery | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "sync" | |||
| "github.com/RichardKnop/machinery/v1/backends/result" | |||
| "github.com/RichardKnop/machinery/v1/brokers/eager" | |||
| "github.com/RichardKnop/machinery/v1/config" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/RichardKnop/machinery/v1/tracing" | |||
| "github.com/google/uuid" | |||
| backendsiface "github.com/RichardKnop/machinery/v1/backends/iface" | |||
| brokersiface "github.com/RichardKnop/machinery/v1/brokers/iface" | |||
| opentracing "github.com/opentracing/opentracing-go" | |||
| ) | |||
| // Server is the main Machinery object and stores all configuration | |||
| // All the tasks workers process are registered against the server | |||
| type Server struct { | |||
| config *config.Config | |||
| registeredTasks map[string]interface{} | |||
| broker brokersiface.Broker | |||
| backend backendsiface.Backend | |||
| prePublishHandler func(*tasks.Signature) | |||
| } | |||
| // NewServerWithBrokerBackend ... | |||
| func NewServerWithBrokerBackend(cnf *config.Config, brokerServer brokersiface.Broker, backendServer backendsiface.Backend) *Server { | |||
| return &Server{ | |||
| config: cnf, | |||
| registeredTasks: make(map[string]interface{}), | |||
| broker: brokerServer, | |||
| backend: backendServer, | |||
| } | |||
| } | |||
| // NewServer creates Server instance | |||
| func NewServer(cnf *config.Config) (*Server, error) { | |||
| broker, err := BrokerFactory(cnf) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // Backend is optional so we ignore the error | |||
| backend, _ := BackendFactory(cnf) | |||
| srv := NewServerWithBrokerBackend(cnf, broker, backend) | |||
| // init for eager-mode | |||
| eager, ok := broker.(eager.Mode) | |||
| if ok { | |||
| // we don't have to call worker.Launch in eager mode | |||
| eager.AssignWorker(srv.NewWorker("eager", 0)) | |||
| } | |||
| return srv, nil | |||
| } | |||
| // NewWorker creates Worker instance | |||
| func (server *Server) NewWorker(consumerTag string, concurrency int) *Worker { | |||
| return &Worker{ | |||
| server: server, | |||
| ConsumerTag: consumerTag, | |||
| Concurrency: concurrency, | |||
| Queue: "", | |||
| } | |||
| } | |||
| // NewCustomQueueWorker creates Worker instance with Custom Queue | |||
| func (server *Server) NewCustomQueueWorker(consumerTag string, concurrency int, queue string) *Worker { | |||
| return &Worker{ | |||
| server: server, | |||
| ConsumerTag: consumerTag, | |||
| Concurrency: concurrency, | |||
| Queue: queue, | |||
| } | |||
| } | |||
| // GetBroker returns broker | |||
| func (server *Server) GetBroker() brokersiface.Broker { | |||
| return server.broker | |||
| } | |||
| // SetBroker sets broker | |||
| func (server *Server) SetBroker(broker brokersiface.Broker) { | |||
| server.broker = broker | |||
| } | |||
| // GetBackend returns backend | |||
| func (server *Server) GetBackend() backendsiface.Backend { | |||
| return server.backend | |||
| } | |||
| // SetBackend sets backend | |||
| func (server *Server) SetBackend(backend backendsiface.Backend) { | |||
| server.backend = backend | |||
| } | |||
| // GetConfig returns connection object | |||
| func (server *Server) GetConfig() *config.Config { | |||
| return server.config | |||
| } | |||
| // SetConfig sets config | |||
| func (server *Server) SetConfig(cnf *config.Config) { | |||
| server.config = cnf | |||
| } | |||
| // SetPreTaskHandler Sets pre publish handler | |||
| func (server *Server) SetPreTaskHandler(handler func(*tasks.Signature)) { | |||
| server.prePublishHandler = handler | |||
| } | |||
| // RegisterTasks registers all tasks at once | |||
| func (server *Server) RegisterTasks(namedTaskFuncs map[string]interface{}) error { | |||
| for _, task := range namedTaskFuncs { | |||
| if err := tasks.ValidateTask(task); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| server.registeredTasks = namedTaskFuncs | |||
| server.broker.SetRegisteredTaskNames(server.GetRegisteredTaskNames()) | |||
| return nil | |||
| } | |||
| // RegisterTask registers a single task | |||
| func (server *Server) RegisterTask(name string, taskFunc interface{}) error { | |||
| if err := tasks.ValidateTask(taskFunc); err != nil { | |||
| return err | |||
| } | |||
| server.registeredTasks[name] = taskFunc | |||
| server.broker.SetRegisteredTaskNames(server.GetRegisteredTaskNames()) | |||
| return nil | |||
| } | |||
| // IsTaskRegistered returns true if the task name is registered with this broker | |||
| func (server *Server) IsTaskRegistered(name string) bool { | |||
| _, ok := server.registeredTasks[name] | |||
| return ok | |||
| } | |||
| // GetRegisteredTask returns registered task by name | |||
| func (server *Server) GetRegisteredTask(name string) (interface{}, error) { | |||
| taskFunc, ok := server.registeredTasks[name] | |||
| if !ok { | |||
| return nil, fmt.Errorf("Task not registered error: %s", name) | |||
| } | |||
| return taskFunc, nil | |||
| } | |||
| // SendTaskWithContext will inject the trace context in the signature headers before publishing it | |||
| func (server *Server) SendTaskWithContext(ctx context.Context, signature *tasks.Signature) (*result.AsyncResult, error) { | |||
| span, _ := opentracing.StartSpanFromContext(ctx, "SendTask", tracing.ProducerOption(), tracing.MachineryTag) | |||
| defer span.Finish() | |||
| // tag the span with some info about the signature | |||
| signature.Headers = tracing.HeadersWithSpan(signature.Headers, span) | |||
| // Make sure result backend is defined | |||
| if server.backend == nil { | |||
| return nil, errors.New("Result backend required") | |||
| } | |||
| // Auto generate a UUID if not set already | |||
| if signature.UUID == "" { | |||
| taskID := uuid.New().String() | |||
| signature.UUID = fmt.Sprintf("task_%v", taskID) | |||
| } | |||
| // Set initial task state to PENDING | |||
| if err := server.backend.SetStatePending(signature); err != nil { | |||
| return nil, fmt.Errorf("Set state pending error: %s", err) | |||
| } | |||
| if server.prePublishHandler != nil { | |||
| server.prePublishHandler(signature) | |||
| } | |||
| if err := server.broker.Publish(ctx, signature); err != nil { | |||
| return nil, fmt.Errorf("Publish message error: %s", err) | |||
| } | |||
| return result.NewAsyncResult(signature, server.backend), nil | |||
| } | |||
| // SendTask publishes a task to the default queue | |||
| func (server *Server) SendTask(signature *tasks.Signature) (*result.AsyncResult, error) { | |||
| return server.SendTaskWithContext(context.Background(), signature) | |||
| } | |||
| // SendChainWithContext will inject the trace context in all the signature headers before publishing it | |||
| func (server *Server) SendChainWithContext(ctx context.Context, chain *tasks.Chain) (*result.ChainAsyncResult, error) { | |||
| span, _ := opentracing.StartSpanFromContext(ctx, "SendChain", tracing.ProducerOption(), tracing.MachineryTag, tracing.WorkflowChainTag) | |||
| defer span.Finish() | |||
| tracing.AnnotateSpanWithChainInfo(span, chain) | |||
| return server.SendChain(chain) | |||
| } | |||
| // SendChain triggers a chain of tasks | |||
| func (server *Server) SendChain(chain *tasks.Chain) (*result.ChainAsyncResult, error) { | |||
| _, err := server.SendTask(chain.Tasks[0]) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return result.NewChainAsyncResult(chain.Tasks, server.backend), nil | |||
| } | |||
| // SendGroupWithContext will inject the trace context in all the signature headers before publishing it | |||
| func (server *Server) SendGroupWithContext(ctx context.Context, group *tasks.Group, sendConcurrency int) ([]*result.AsyncResult, error) { | |||
| span, _ := opentracing.StartSpanFromContext(ctx, "SendGroup", tracing.ProducerOption(), tracing.MachineryTag, tracing.WorkflowGroupTag) | |||
| defer span.Finish() | |||
| tracing.AnnotateSpanWithGroupInfo(span, group, sendConcurrency) | |||
| // Make sure result backend is defined | |||
| if server.backend == nil { | |||
| return nil, errors.New("Result backend required") | |||
| } | |||
| asyncResults := make([]*result.AsyncResult, len(group.Tasks)) | |||
| var wg sync.WaitGroup | |||
| wg.Add(len(group.Tasks)) | |||
| errorsChan := make(chan error, len(group.Tasks)*2) | |||
| // Init group | |||
| server.backend.InitGroup(group.GroupUUID, group.GetUUIDs()) | |||
| // Init the tasks Pending state first | |||
| for _, signature := range group.Tasks { | |||
| if err := server.backend.SetStatePending(signature); err != nil { | |||
| errorsChan <- err | |||
| continue | |||
| } | |||
| } | |||
| pool := make(chan struct{}, sendConcurrency) | |||
| go func() { | |||
| for i := 0; i < sendConcurrency; i++ { | |||
| pool <- struct{}{} | |||
| } | |||
| }() | |||
| for i, signature := range group.Tasks { | |||
| if sendConcurrency > 0 { | |||
| <-pool | |||
| } | |||
| go func(s *tasks.Signature, index int) { | |||
| defer wg.Done() | |||
| // Publish task | |||
| err := server.broker.Publish(ctx, s) | |||
| if sendConcurrency > 0 { | |||
| pool <- struct{}{} | |||
| } | |||
| if err != nil { | |||
| errorsChan <- fmt.Errorf("Publish message error: %s", err) | |||
| return | |||
| } | |||
| asyncResults[index] = result.NewAsyncResult(s, server.backend) | |||
| }(signature, i) | |||
| } | |||
| done := make(chan int) | |||
| go func() { | |||
| wg.Wait() | |||
| done <- 1 | |||
| }() | |||
| select { | |||
| case err := <-errorsChan: | |||
| return asyncResults, err | |||
| case <-done: | |||
| return asyncResults, nil | |||
| } | |||
| } | |||
| // SendGroup triggers a group of parallel tasks | |||
| func (server *Server) SendGroup(group *tasks.Group, sendConcurrency int) ([]*result.AsyncResult, error) { | |||
| return server.SendGroupWithContext(context.Background(), group, sendConcurrency) | |||
| } | |||
| // SendChordWithContext will inject the trace context in all the signature headers before publishing it | |||
| func (server *Server) SendChordWithContext(ctx context.Context, chord *tasks.Chord, sendConcurrency int) (*result.ChordAsyncResult, error) { | |||
| span, _ := opentracing.StartSpanFromContext(ctx, "SendChord", tracing.ProducerOption(), tracing.MachineryTag, tracing.WorkflowChordTag) | |||
| defer span.Finish() | |||
| tracing.AnnotateSpanWithChordInfo(span, chord, sendConcurrency) | |||
| _, err := server.SendGroupWithContext(ctx, chord.Group, sendConcurrency) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return result.NewChordAsyncResult( | |||
| chord.Group.Tasks, | |||
| chord.Callback, | |||
| server.backend, | |||
| ), nil | |||
| } | |||
| // SendChord triggers a group of parallel tasks with a callback | |||
| func (server *Server) SendChord(chord *tasks.Chord, sendConcurrency int) (*result.ChordAsyncResult, error) { | |||
| return server.SendChordWithContext(context.Background(), chord, sendConcurrency) | |||
| } | |||
| // GetRegisteredTaskNames returns slice of registered task names | |||
| func (server *Server) GetRegisteredTaskNames() []string { | |||
| taskNames := make([]string, len(server.registeredTasks)) | |||
| var i = 0 | |||
| for name := range server.registeredTasks { | |||
| taskNames[i] = name | |||
| i++ | |||
| } | |||
| return taskNames | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| package tasks | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| ) | |||
| // ErrRetryTaskLater ... | |||
| type ErrRetryTaskLater struct { | |||
| name, msg string | |||
| retryIn time.Duration | |||
| } | |||
| // RetryIn returns time.Duration from now when task should be retried | |||
| func (e ErrRetryTaskLater) RetryIn() time.Duration { | |||
| return e.retryIn | |||
| } | |||
| // Error implements the error interface | |||
| func (e ErrRetryTaskLater) Error() string { | |||
| return fmt.Sprintf("Task error: %s Will retry in: %s", e.msg, e.retryIn) | |||
| } | |||
| // NewErrRetryTaskLater returns new ErrRetryTaskLater instance | |||
| func NewErrRetryTaskLater(msg string, retryIn time.Duration) ErrRetryTaskLater { | |||
| return ErrRetryTaskLater{msg: msg, retryIn: retryIn} | |||
| } | |||
| // Retriable is interface that retriable errors should implement | |||
| type Retriable interface { | |||
| RetryIn() time.Duration | |||
| } | |||
| @@ -0,0 +1,352 @@ | |||
| package tasks | |||
| import ( | |||
| "context" | |||
| "encoding/base64" | |||
| "encoding/json" | |||
| "fmt" | |||
| "reflect" | |||
| "strings" | |||
| ) | |||
| var ( | |||
| typesMap = map[string]reflect.Type{ | |||
| // base types | |||
| "bool": reflect.TypeOf(true), | |||
| "int": reflect.TypeOf(int(1)), | |||
| "int8": reflect.TypeOf(int8(1)), | |||
| "int16": reflect.TypeOf(int16(1)), | |||
| "int32": reflect.TypeOf(int32(1)), | |||
| "int64": reflect.TypeOf(int64(1)), | |||
| "uint": reflect.TypeOf(uint(1)), | |||
| "uint8": reflect.TypeOf(uint8(1)), | |||
| "uint16": reflect.TypeOf(uint16(1)), | |||
| "uint32": reflect.TypeOf(uint32(1)), | |||
| "uint64": reflect.TypeOf(uint64(1)), | |||
| "float32": reflect.TypeOf(float32(0.5)), | |||
| "float64": reflect.TypeOf(float64(0.5)), | |||
| "string": reflect.TypeOf(string("")), | |||
| // slices | |||
| "[]bool": reflect.TypeOf(make([]bool, 0)), | |||
| "[]int": reflect.TypeOf(make([]int, 0)), | |||
| "[]int8": reflect.TypeOf(make([]int8, 0)), | |||
| "[]int16": reflect.TypeOf(make([]int16, 0)), | |||
| "[]int32": reflect.TypeOf(make([]int32, 0)), | |||
| "[]int64": reflect.TypeOf(make([]int64, 0)), | |||
| "[]uint": reflect.TypeOf(make([]uint, 0)), | |||
| "[]uint8": reflect.TypeOf(make([]uint8, 0)), | |||
| "[]uint16": reflect.TypeOf(make([]uint16, 0)), | |||
| "[]uint32": reflect.TypeOf(make([]uint32, 0)), | |||
| "[]uint64": reflect.TypeOf(make([]uint64, 0)), | |||
| "[]float32": reflect.TypeOf(make([]float32, 0)), | |||
| "[]float64": reflect.TypeOf(make([]float64, 0)), | |||
| "[]byte": reflect.TypeOf(make([]byte, 0)), | |||
| "[]string": reflect.TypeOf([]string{""}), | |||
| } | |||
| ctxType = reflect.TypeOf((*context.Context)(nil)).Elem() | |||
| typeConversionError = func(argValue interface{}, argTypeStr string) error { | |||
| return fmt.Errorf("%v is not %v", argValue, argTypeStr) | |||
| } | |||
| ) | |||
| // ErrUnsupportedType ... | |||
| type ErrUnsupportedType struct { | |||
| valueType string | |||
| } | |||
| // NewErrUnsupportedType returns new ErrUnsupportedType | |||
| func NewErrUnsupportedType(valueType string) ErrUnsupportedType { | |||
| return ErrUnsupportedType{valueType} | |||
| } | |||
| // Error method so we implement the error interface | |||
| func (e ErrUnsupportedType) Error() string { | |||
| return fmt.Sprintf("%v is not one of supported types", e.valueType) | |||
| } | |||
| // ReflectValue converts interface{} to reflect.Value based on string type | |||
| func ReflectValue(valueType string, value interface{}) (reflect.Value, error) { | |||
| if strings.HasPrefix(valueType, "[]") { | |||
| return reflectValues(valueType, value) | |||
| } | |||
| return reflectValue(valueType, value) | |||
| } | |||
| // reflectValue converts interface{} to reflect.Value based on string type | |||
| // representing a base type (not a slice) | |||
| func reflectValue(valueType string, value interface{}) (reflect.Value, error) { | |||
| theType, ok := typesMap[valueType] | |||
| if !ok { | |||
| return reflect.Value{}, NewErrUnsupportedType(valueType) | |||
| } | |||
| theValue := reflect.New(theType) | |||
| // Booleans | |||
| if theType.String() == "bool" { | |||
| boolValue, err := getBoolValue(theType.String(), value) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Elem().SetBool(boolValue) | |||
| return theValue.Elem(), nil | |||
| } | |||
| // Integers | |||
| if strings.HasPrefix(theType.String(), "int") { | |||
| intValue, err := getIntValue(theType.String(), value) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Elem().SetInt(intValue) | |||
| return theValue.Elem(), err | |||
| } | |||
| // Unsigned integers | |||
| if strings.HasPrefix(theType.String(), "uint") { | |||
| uintValue, err := getUintValue(theType.String(), value) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Elem().SetUint(uintValue) | |||
| return theValue.Elem(), err | |||
| } | |||
| // Floating point numbers | |||
| if strings.HasPrefix(theType.String(), "float") { | |||
| floatValue, err := getFloatValue(theType.String(), value) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Elem().SetFloat(floatValue) | |||
| return theValue.Elem(), err | |||
| } | |||
| // Strings | |||
| if theType.String() == "string" { | |||
| stringValue, err := getStringValue(theType.String(), value) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Elem().SetString(stringValue) | |||
| return theValue.Elem(), nil | |||
| } | |||
| return reflect.Value{}, NewErrUnsupportedType(valueType) | |||
| } | |||
| // reflectValues converts interface{} to reflect.Value based on string type | |||
| // representing a slice of values | |||
| func reflectValues(valueType string, value interface{}) (reflect.Value, error) { | |||
| theType, ok := typesMap[valueType] | |||
| if !ok { | |||
| return reflect.Value{}, NewErrUnsupportedType(valueType) | |||
| } | |||
| // For NULL we return an empty slice | |||
| if value == nil { | |||
| return reflect.MakeSlice(theType, 0, 0), nil | |||
| } | |||
| var theValue reflect.Value | |||
| // Booleans | |||
| if theType.String() == "[]bool" { | |||
| bools := reflect.ValueOf(value) | |||
| theValue = reflect.MakeSlice(theType, bools.Len(), bools.Len()) | |||
| for i := 0; i < bools.Len(); i++ { | |||
| boolValue, err := getBoolValue(strings.Split(theType.String(), "[]")[1], bools.Index(i).Interface()) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Index(i).SetBool(boolValue) | |||
| } | |||
| return theValue, nil | |||
| } | |||
| // Integers | |||
| if strings.HasPrefix(theType.String(), "[]int") { | |||
| ints := reflect.ValueOf(value) | |||
| theValue = reflect.MakeSlice(theType, ints.Len(), ints.Len()) | |||
| for i := 0; i < ints.Len(); i++ { | |||
| intValue, err := getIntValue(strings.Split(theType.String(), "[]")[1], ints.Index(i).Interface()) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Index(i).SetInt(intValue) | |||
| } | |||
| return theValue, nil | |||
| } | |||
| // Unsigned integers | |||
| if strings.HasPrefix(theType.String(), "[]uint") || theType.String() == "[]byte" { | |||
| // Decode the base64 string if the value type is []uint8 or it's alias []byte | |||
| // See: https://golang.org/pkg/encoding/json/#Marshal | |||
| // > Array and slice values encode as JSON arrays, except that []byte encodes as a base64-encoded string | |||
| if reflect.TypeOf(value).String() == "string" { | |||
| output, err := base64.StdEncoding.DecodeString(value.(string)) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| value = output | |||
| } | |||
| uints := reflect.ValueOf(value) | |||
| theValue = reflect.MakeSlice(theType, uints.Len(), uints.Len()) | |||
| for i := 0; i < uints.Len(); i++ { | |||
| uintValue, err := getUintValue(strings.Split(theType.String(), "[]")[1], uints.Index(i).Interface()) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Index(i).SetUint(uintValue) | |||
| } | |||
| return theValue, nil | |||
| } | |||
| // Floating point numbers | |||
| if strings.HasPrefix(theType.String(), "[]float") { | |||
| floats := reflect.ValueOf(value) | |||
| theValue = reflect.MakeSlice(theType, floats.Len(), floats.Len()) | |||
| for i := 0; i < floats.Len(); i++ { | |||
| floatValue, err := getFloatValue(strings.Split(theType.String(), "[]")[1], floats.Index(i).Interface()) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Index(i).SetFloat(floatValue) | |||
| } | |||
| return theValue, nil | |||
| } | |||
| // Strings | |||
| if theType.String() == "[]string" { | |||
| strs := reflect.ValueOf(value) | |||
| theValue = reflect.MakeSlice(theType, strs.Len(), strs.Len()) | |||
| for i := 0; i < strs.Len(); i++ { | |||
| strValue, err := getStringValue(strings.Split(theType.String(), "[]")[1], strs.Index(i).Interface()) | |||
| if err != nil { | |||
| return reflect.Value{}, err | |||
| } | |||
| theValue.Index(i).SetString(strValue) | |||
| } | |||
| return theValue, nil | |||
| } | |||
| return reflect.Value{}, NewErrUnsupportedType(valueType) | |||
| } | |||
| func getBoolValue(theType string, value interface{}) (bool, error) { | |||
| b, ok := value.(bool) | |||
| if !ok { | |||
| return false, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return b, nil | |||
| } | |||
| func getIntValue(theType string, value interface{}) (int64, error) { | |||
| // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures. | |||
| // This is because JSON only supports 64-bit floating point numbers and we could lose precision | |||
| // when converting from float64 to signed integer | |||
| if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") { | |||
| n, ok := value.(json.Number) | |||
| if !ok { | |||
| return 0, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return n.Int64() | |||
| } | |||
| n, ok := value.(int64) | |||
| if !ok { | |||
| return 0, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return n, nil | |||
| } | |||
| func getUintValue(theType string, value interface{}) (uint64, error) { | |||
| // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures. | |||
| // This is because JSON only supports 64-bit floating point numbers and we could lose precision | |||
| // when converting from float64 to unsigned integer | |||
| if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") { | |||
| n, ok := value.(json.Number) | |||
| if !ok { | |||
| return 0, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| intVal, err := n.Int64() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| return uint64(intVal), nil | |||
| } | |||
| var n uint64 | |||
| switch value.(type) { | |||
| case uint64: | |||
| n = value.(uint64) | |||
| case uint8: | |||
| n = uint64(value.(uint8)) | |||
| default: | |||
| return 0, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return n, nil | |||
| } | |||
| func getFloatValue(theType string, value interface{}) (float64, error) { | |||
| // We use https://golang.org/pkg/encoding/json/#Decoder.UseNumber when unmarshaling signatures. | |||
| // This is because JSON only supports 64-bit floating point numbers and we could lose precision | |||
| if strings.HasPrefix(fmt.Sprintf("%T", value), "json.Number") { | |||
| n, ok := value.(json.Number) | |||
| if !ok { | |||
| return 0, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return n.Float64() | |||
| } | |||
| f, ok := value.(float64) | |||
| if !ok { | |||
| return 0, typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return f, nil | |||
| } | |||
| func getStringValue(theType string, value interface{}) (string, error) { | |||
| s, ok := value.(string) | |||
| if !ok { | |||
| return "", typeConversionError(value, typesMap[theType].String()) | |||
| } | |||
| return s, nil | |||
| } | |||
| // IsContextType checks to see if the type is a context.Context | |||
| func IsContextType(t reflect.Type) bool { | |||
| return t == ctxType | |||
| } | |||
| @@ -0,0 +1,40 @@ | |||
| package tasks | |||
| import ( | |||
| "fmt" | |||
| "reflect" | |||
| "strings" | |||
| ) | |||
| // TaskResult represents an actual return value of a processed task | |||
| type TaskResult struct { | |||
| Type string `bson:"type"` | |||
| Value interface{} `bson:"value"` | |||
| } | |||
| // ReflectTaskResults ... | |||
| func ReflectTaskResults(taskResults []*TaskResult) ([]reflect.Value, error) { | |||
| resultValues := make([]reflect.Value, len(taskResults)) | |||
| for i, taskResult := range taskResults { | |||
| resultValue, err := ReflectValue(taskResult.Type, taskResult.Value) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| resultValues[i] = resultValue | |||
| } | |||
| return resultValues, nil | |||
| } | |||
| // HumanReadableResults ... | |||
| func HumanReadableResults(results []reflect.Value) string { | |||
| if len(results) == 1 { | |||
| return fmt.Sprintf("%v", results[0].Interface()) | |||
| } | |||
| readableResults := make([]string, len(results)) | |||
| for i := 0; i < len(results); i++ { | |||
| readableResults[i] = fmt.Sprintf("%v", results[i].Interface()) | |||
| } | |||
| return fmt.Sprintf("[%s]", strings.Join(readableResults, ", ")) | |||
| } | |||
| @@ -0,0 +1,74 @@ | |||
| package tasks | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "github.com/google/uuid" | |||
| ) | |||
| // Arg represents a single argument passed to invocation fo a task | |||
| type Arg struct { | |||
| Name string `bson:"name"` | |||
| Type string `bson:"type"` | |||
| Value interface{} `bson:"value"` | |||
| } | |||
| // Headers represents the headers which should be used to direct the task | |||
| type Headers map[string]interface{} | |||
| // Set on Headers implements opentracing.TextMapWriter for trace propagation | |||
| func (h Headers) Set(key, val string) { | |||
| h[key] = val | |||
| } | |||
| // ForeachKey on Headers implements opentracing.TextMapReader for trace propagation. | |||
| // It is essentially the same as the opentracing.TextMapReader implementation except | |||
| // for the added casting from interface{} to string. | |||
| func (h Headers) ForeachKey(handler func(key, val string) error) error { | |||
| for k, v := range h { | |||
| // Skip any non string values | |||
| stringValue, ok := v.(string) | |||
| if !ok { | |||
| continue | |||
| } | |||
| if err := handler(k, stringValue); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // Signature represents a single task invocation | |||
| type Signature struct { | |||
| UUID string | |||
| Name string | |||
| RoutingKey string | |||
| ETA *time.Time | |||
| GroupUUID string | |||
| GroupTaskCount int | |||
| Args []Arg | |||
| Headers Headers | |||
| Immutable bool | |||
| RetryCount int | |||
| RetryTimeout int | |||
| OnSuccess []*Signature | |||
| OnError []*Signature | |||
| ChordCallback *Signature | |||
| //MessageGroupId for Broker, e.g. SQS | |||
| BrokerMessageGroupId string | |||
| //ReceiptHandle of SQS Message | |||
| SQSReceiptHandle string | |||
| } | |||
| // NewSignature creates a new task signature | |||
| func NewSignature(name string, args []Arg) (*Signature, error) { | |||
| signatureID := uuid.New().String() | |||
| return &Signature{ | |||
| UUID: fmt.Sprintf("task_%v", signatureID), | |||
| Name: name, | |||
| Args: args, | |||
| }, nil | |||
| } | |||
| @@ -0,0 +1,107 @@ | |||
| package tasks | |||
| import "time" | |||
| const ( | |||
| // StatePending - initial state of a task | |||
| StatePending = "PENDING" | |||
| // StateReceived - when task is received by a worker | |||
| StateReceived = "RECEIVED" | |||
| // StateStarted - when the worker starts processing the task | |||
| StateStarted = "STARTED" | |||
| // StateRetry - when failed task has been scheduled for retry | |||
| StateRetry = "RETRY" | |||
| // StateSuccess - when the task is processed successfully | |||
| StateSuccess = "SUCCESS" | |||
| // StateFailure - when processing of the task fails | |||
| StateFailure = "FAILURE" | |||
| ) | |||
| // TaskState represents a state of a task | |||
| type TaskState struct { | |||
| TaskUUID string `bson:"_id"` | |||
| TaskName string `bson:"task_name"` | |||
| State string `bson:"state"` | |||
| Results []*TaskResult `bson:"results"` | |||
| Error string `bson:"error"` | |||
| CreatedAt time.Time `bson:"created_at"` | |||
| } | |||
| // GroupMeta stores useful metadata about tasks within the same group | |||
| // E.g. UUIDs of all tasks which are used in order to check if all tasks | |||
| // completed successfully or not and thus whether to trigger chord callback | |||
| type GroupMeta struct { | |||
| GroupUUID string `bson:"_id"` | |||
| TaskUUIDs []string `bson:"task_uuids"` | |||
| ChordTriggered bool `bson:"chord_triggered"` | |||
| Lock bool `bson:"lock"` | |||
| CreatedAt time.Time `bson:"created_at"` | |||
| } | |||
| // NewPendingTaskState ... | |||
| func NewPendingTaskState(signature *Signature) *TaskState { | |||
| return &TaskState{ | |||
| TaskUUID: signature.UUID, | |||
| TaskName: signature.Name, | |||
| State: StatePending, | |||
| CreatedAt: time.Now().UTC(), | |||
| } | |||
| } | |||
| // NewReceivedTaskState ... | |||
| func NewReceivedTaskState(signature *Signature) *TaskState { | |||
| return &TaskState{ | |||
| TaskUUID: signature.UUID, | |||
| State: StateReceived, | |||
| } | |||
| } | |||
| // NewStartedTaskState ... | |||
| func NewStartedTaskState(signature *Signature) *TaskState { | |||
| return &TaskState{ | |||
| TaskUUID: signature.UUID, | |||
| State: StateStarted, | |||
| } | |||
| } | |||
| // NewSuccessTaskState ... | |||
| func NewSuccessTaskState(signature *Signature, results []*TaskResult) *TaskState { | |||
| return &TaskState{ | |||
| TaskUUID: signature.UUID, | |||
| State: StateSuccess, | |||
| Results: results, | |||
| } | |||
| } | |||
| // NewFailureTaskState ... | |||
| func NewFailureTaskState(signature *Signature, err string) *TaskState { | |||
| return &TaskState{ | |||
| TaskUUID: signature.UUID, | |||
| State: StateFailure, | |||
| Error: err, | |||
| } | |||
| } | |||
| // NewRetryTaskState ... | |||
| func NewRetryTaskState(signature *Signature) *TaskState { | |||
| return &TaskState{ | |||
| TaskUUID: signature.UUID, | |||
| State: StateRetry, | |||
| } | |||
| } | |||
| // IsCompleted returns true if state is SUCCESS or FAILURE, | |||
| // i.e. the task has finished processing and either succeeded or failed. | |||
| func (taskState *TaskState) IsCompleted() bool { | |||
| return taskState.IsSuccess() || taskState.IsFailure() | |||
| } | |||
| // IsSuccess returns true if state is SUCCESS | |||
| func (taskState *TaskState) IsSuccess() bool { | |||
| return taskState.State == StateSuccess | |||
| } | |||
| // IsFailure returns true if state is FAILURE | |||
| func (taskState *TaskState) IsFailure() bool { | |||
| return taskState.State == StateFailure | |||
| } | |||
| @@ -0,0 +1,201 @@ | |||
| package tasks | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "reflect" | |||
| "runtime/debug" | |||
| opentracing "github.com/opentracing/opentracing-go" | |||
| opentracing_ext "github.com/opentracing/opentracing-go/ext" | |||
| opentracing_log "github.com/opentracing/opentracing-go/log" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| ) | |||
| // ErrTaskPanicked ... | |||
| var ErrTaskPanicked = errors.New("Invoking task caused a panic") | |||
| // Task wraps a signature and methods used to reflect task arguments and | |||
| // return values after invoking the task | |||
| type Task struct { | |||
| TaskFunc reflect.Value | |||
| UseContext bool | |||
| Context context.Context | |||
| Args []reflect.Value | |||
| } | |||
| type signatureCtxType struct{} | |||
| var signatureCtx signatureCtxType | |||
| // SignatureFromContext gets the signature from the context | |||
| func SignatureFromContext(ctx context.Context) *Signature { | |||
| if ctx == nil { | |||
| return nil | |||
| } | |||
| v := ctx.Value(signatureCtx) | |||
| if v == nil { | |||
| return nil | |||
| } | |||
| signature, _ := v.(*Signature) | |||
| return signature | |||
| } | |||
| // NewWithSignature is the same as New but injects the signature | |||
| func NewWithSignature(taskFunc interface{}, signature *Signature) (*Task, error) { | |||
| args := signature.Args | |||
| ctx := context.Background() | |||
| ctx = context.WithValue(ctx, signatureCtx, signature) | |||
| task := &Task{ | |||
| TaskFunc: reflect.ValueOf(taskFunc), | |||
| Context: ctx, | |||
| } | |||
| taskFuncType := reflect.TypeOf(taskFunc) | |||
| if taskFuncType.NumIn() > 0 { | |||
| arg0Type := taskFuncType.In(0) | |||
| if IsContextType(arg0Type) { | |||
| task.UseContext = true | |||
| } | |||
| } | |||
| if err := task.ReflectArgs(args); err != nil { | |||
| return nil, fmt.Errorf("Reflect task args error: %s", err) | |||
| } | |||
| return task, nil | |||
| } | |||
| // New tries to use reflection to convert the function and arguments | |||
| // into a reflect.Value and prepare it for invocation | |||
| func New(taskFunc interface{}, args []Arg) (*Task, error) { | |||
| task := &Task{ | |||
| TaskFunc: reflect.ValueOf(taskFunc), | |||
| Context: context.Background(), | |||
| } | |||
| taskFuncType := reflect.TypeOf(taskFunc) | |||
| if taskFuncType.NumIn() > 0 { | |||
| arg0Type := taskFuncType.In(0) | |||
| if IsContextType(arg0Type) { | |||
| task.UseContext = true | |||
| } | |||
| } | |||
| if err := task.ReflectArgs(args); err != nil { | |||
| return nil, fmt.Errorf("Reflect task args error: %s", err) | |||
| } | |||
| return task, nil | |||
| } | |||
| // Call attempts to call the task with the supplied arguments. | |||
| // | |||
| // `err` is set in the return value in two cases: | |||
| // 1. The reflected function invocation panics (e.g. due to a mismatched | |||
| // argument list). | |||
| // 2. The task func itself returns a non-nil error. | |||
| func (t *Task) Call() (taskResults []*TaskResult, err error) { | |||
| // retrieve the span from the task's context and finish it as soon as this function returns | |||
| if span := opentracing.SpanFromContext(t.Context); span != nil { | |||
| defer span.Finish() | |||
| } | |||
| defer func() { | |||
| // Recover from panic and set err. | |||
| if e := recover(); e != nil { | |||
| switch e := e.(type) { | |||
| default: | |||
| err = ErrTaskPanicked | |||
| case error: | |||
| err = e | |||
| case string: | |||
| err = errors.New(e) | |||
| } | |||
| // mark the span as failed and dump the error and stack trace to the span | |||
| if span := opentracing.SpanFromContext(t.Context); span != nil { | |||
| opentracing_ext.Error.Set(span, true) | |||
| span.LogFields( | |||
| opentracing_log.Error(err), | |||
| opentracing_log.Object("stack", string(debug.Stack())), | |||
| ) | |||
| } | |||
| // Print stack trace | |||
| log.ERROR.Printf("%s", debug.Stack()) | |||
| } | |||
| }() | |||
| args := t.Args | |||
| if t.UseContext { | |||
| ctxValue := reflect.ValueOf(t.Context) | |||
| args = append([]reflect.Value{ctxValue}, args...) | |||
| } | |||
| // Invoke the task | |||
| results := t.TaskFunc.Call(args) | |||
| // Task must return at least a value | |||
| if len(results) == 0 { | |||
| return nil, ErrTaskReturnsNoValue | |||
| } | |||
| // Last returned value | |||
| lastResult := results[len(results)-1] | |||
| // If the last returned value is not nil, it has to be of error type, if that | |||
| // is not the case, return error message, otherwise propagate the task error | |||
| // to the caller | |||
| if !lastResult.IsNil() { | |||
| // If the result implements Retriable interface, return instance of Retriable | |||
| retriableErrorInterface := reflect.TypeOf((*Retriable)(nil)).Elem() | |||
| if lastResult.Type().Implements(retriableErrorInterface) { | |||
| return nil, lastResult.Interface().(ErrRetryTaskLater) | |||
| } | |||
| // Otherwise, check that the result implements the standard error interface, | |||
| // if not, return ErrLastReturnValueMustBeError error | |||
| errorInterface := reflect.TypeOf((*error)(nil)).Elem() | |||
| if !lastResult.Type().Implements(errorInterface) { | |||
| return nil, ErrLastReturnValueMustBeError | |||
| } | |||
| // Return the standard error | |||
| return nil, lastResult.Interface().(error) | |||
| } | |||
| // Convert reflect values to task results | |||
| taskResults = make([]*TaskResult, len(results)-1) | |||
| for i := 0; i < len(results)-1; i++ { | |||
| val := results[i].Interface() | |||
| typeStr := reflect.TypeOf(val).String() | |||
| taskResults[i] = &TaskResult{ | |||
| Type: typeStr, | |||
| Value: val, | |||
| } | |||
| } | |||
| return taskResults, err | |||
| } | |||
| // ReflectArgs converts []TaskArg to []reflect.Value | |||
| func (t *Task) ReflectArgs(args []Arg) error { | |||
| argValues := make([]reflect.Value, len(args)) | |||
| for i, arg := range args { | |||
| argValue, err := ReflectValue(arg.Type, arg.Value) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| argValues[i] = argValue | |||
| } | |||
| t.Args = argValues | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,42 @@ | |||
| package tasks | |||
| import ( | |||
| "errors" | |||
| "reflect" | |||
| ) | |||
| var ( | |||
| // ErrTaskMustBeFunc ... | |||
| ErrTaskMustBeFunc = errors.New("Task must be a func type") | |||
| // ErrTaskReturnsNoValue ... | |||
| ErrTaskReturnsNoValue = errors.New("Task must return at least a single value") | |||
| // ErrLastReturnValueMustBeError .. | |||
| ErrLastReturnValueMustBeError = errors.New("Last return value of a task must be error") | |||
| ) | |||
| // ValidateTask validates task function using reflection and makes sure | |||
| // it has a proper signature. Functions used as tasks must return at least a | |||
| // single value and the last return type must be error | |||
| func ValidateTask(task interface{}) error { | |||
| v := reflect.ValueOf(task) | |||
| t := v.Type() | |||
| // Task must be a function | |||
| if t.Kind() != reflect.Func { | |||
| return ErrTaskMustBeFunc | |||
| } | |||
| // Task must return at least a single value | |||
| if t.NumOut() < 1 { | |||
| return ErrTaskReturnsNoValue | |||
| } | |||
| // Last return value must be error | |||
| lastReturnType := t.Out(t.NumOut() - 1) | |||
| errorInterface := reflect.TypeOf((*error)(nil)).Elem() | |||
| if !lastReturnType.Implements(errorInterface) { | |||
| return ErrLastReturnValueMustBeError | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,95 @@ | |||
| package tasks | |||
| import ( | |||
| "fmt" | |||
| "github.com/google/uuid" | |||
| ) | |||
| // Chain creates a chain of tasks to be executed one after another | |||
| type Chain struct { | |||
| Tasks []*Signature | |||
| } | |||
| // Group creates a set of tasks to be executed in parallel | |||
| type Group struct { | |||
| GroupUUID string | |||
| Tasks []*Signature | |||
| } | |||
| // Chord adds an optional callback to the group to be executed | |||
| // after all tasks in the group finished | |||
| type Chord struct { | |||
| Group *Group | |||
| Callback *Signature | |||
| } | |||
| // GetUUIDs returns slice of task UUIDS | |||
| func (group *Group) GetUUIDs() []string { | |||
| taskUUIDs := make([]string, len(group.Tasks)) | |||
| for i, signature := range group.Tasks { | |||
| taskUUIDs[i] = signature.UUID | |||
| } | |||
| return taskUUIDs | |||
| } | |||
| // NewChain creates a new chain of tasks to be processed one by one, passing | |||
| // results unless task signatures are set to be immutable | |||
| func NewChain(signatures ...*Signature) (*Chain, error) { | |||
| // Auto generate task UUIDs if needed | |||
| for _, signature := range signatures { | |||
| if signature.UUID == "" { | |||
| signatureID := uuid.New().String() | |||
| signature.UUID = fmt.Sprintf("task_%v", signatureID) | |||
| } | |||
| } | |||
| for i := len(signatures) - 1; i > 0; i-- { | |||
| if i > 0 { | |||
| signatures[i-1].OnSuccess = []*Signature{signatures[i]} | |||
| } | |||
| } | |||
| chain := &Chain{Tasks: signatures} | |||
| return chain, nil | |||
| } | |||
| // NewGroup creates a new group of tasks to be processed in parallel | |||
| func NewGroup(signatures ...*Signature) (*Group, error) { | |||
| // Generate a group UUID | |||
| groupUUID := uuid.New().String() | |||
| groupID := fmt.Sprintf("group_%v", groupUUID) | |||
| // Auto generate task UUIDs if needed, group tasks by common group UUID | |||
| for _, signature := range signatures { | |||
| if signature.UUID == "" { | |||
| signatureID := uuid.New().String() | |||
| signature.UUID = fmt.Sprintf("task_%v", signatureID) | |||
| } | |||
| signature.GroupUUID = groupID | |||
| signature.GroupTaskCount = len(signatures) | |||
| } | |||
| return &Group{ | |||
| GroupUUID: groupID, | |||
| Tasks: signatures, | |||
| }, nil | |||
| } | |||
| // NewChord creates a new chord (a group of tasks with a single callback | |||
| // to be executed after all tasks in the group has completed) | |||
| func NewChord(group *Group, callback *Signature) (*Chord, error) { | |||
| if callback.UUID == "" { | |||
| // Generate a UUID for the chord callback | |||
| callbackUUID := uuid.New().String() | |||
| callback.UUID = fmt.Sprintf("chord_%v", callbackUUID) | |||
| } | |||
| // Add a chord callback to all tasks | |||
| for _, signature := range group.Tasks { | |||
| signature.ChordCallback = callback | |||
| } | |||
| return &Chord{Group: group, Callback: callback}, nil | |||
| } | |||
| @@ -0,0 +1,141 @@ | |||
| package tracing | |||
| import ( | |||
| "encoding/json" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| opentracing "github.com/opentracing/opentracing-go" | |||
| opentracing_ext "github.com/opentracing/opentracing-go/ext" | |||
| opentracing_log "github.com/opentracing/opentracing-go/log" | |||
| ) | |||
| // opentracing tags | |||
| var ( | |||
| MachineryTag = opentracing.Tag{Key: string(opentracing_ext.Component), Value: "machinery"} | |||
| WorkflowGroupTag = opentracing.Tag{Key: "machinery.workflow", Value: "group"} | |||
| WorkflowChordTag = opentracing.Tag{Key: "machinery.workflow", Value: "chord"} | |||
| WorkflowChainTag = opentracing.Tag{Key: "machinery.workflow", Value: "chain"} | |||
| ) | |||
| // StartSpanFromHeaders will extract a span from the signature headers | |||
| // and start a new span with the given operation name. | |||
| func StartSpanFromHeaders(headers tasks.Headers, operationName string) opentracing.Span { | |||
| // Try to extract the span context from the carrier. | |||
| spanContext, err := opentracing.GlobalTracer().Extract(opentracing.TextMap, headers) | |||
| // Create a new span from the span context if found or start a new trace with the function name. | |||
| // For clarity add the machinery component tag. | |||
| span := opentracing.StartSpan( | |||
| operationName, | |||
| ConsumerOption(spanContext), | |||
| MachineryTag, | |||
| ) | |||
| // Log any error but don't fail | |||
| if err != nil { | |||
| span.LogFields(opentracing_log.Error(err)) | |||
| } | |||
| return span | |||
| } | |||
| // HeadersWithSpan will inject a span into the signature headers | |||
| func HeadersWithSpan(headers tasks.Headers, span opentracing.Span) tasks.Headers { | |||
| // check if the headers aren't nil | |||
| if headers == nil { | |||
| headers = make(tasks.Headers) | |||
| } | |||
| if err := opentracing.GlobalTracer().Inject(span.Context(), opentracing.TextMap, headers); err != nil { | |||
| span.LogFields(opentracing_log.Error(err)) | |||
| } | |||
| return headers | |||
| } | |||
| type consumerOption struct { | |||
| producerContext opentracing.SpanContext | |||
| } | |||
| func (c consumerOption) Apply(o *opentracing.StartSpanOptions) { | |||
| if c.producerContext != nil { | |||
| opentracing.FollowsFrom(c.producerContext).Apply(o) | |||
| } | |||
| opentracing_ext.SpanKindConsumer.Apply(o) | |||
| } | |||
| // ConsumerOption ... | |||
| func ConsumerOption(producer opentracing.SpanContext) opentracing.StartSpanOption { | |||
| return consumerOption{producer} | |||
| } | |||
| type producerOption struct{} | |||
| func (p producerOption) Apply(o *opentracing.StartSpanOptions) { | |||
| opentracing_ext.SpanKindProducer.Apply(o) | |||
| } | |||
| // ProducerOption ... | |||
| func ProducerOption() opentracing.StartSpanOption { | |||
| return producerOption{} | |||
| } | |||
| // AnnotateSpanWithSignatureInfo ... | |||
| func AnnotateSpanWithSignatureInfo(span opentracing.Span, signature *tasks.Signature) { | |||
| // tag the span with some info about the signature | |||
| span.SetTag("signature.name", signature.Name) | |||
| span.SetTag("signature.uuid", signature.UUID) | |||
| if signature.GroupUUID != "" { | |||
| span.SetTag("signature.group.uuid", signature.UUID) | |||
| } | |||
| if signature.ChordCallback != nil { | |||
| span.SetTag("signature.chord.callback.uuid", signature.ChordCallback.UUID) | |||
| span.SetTag("signature.chord.callback.name", signature.ChordCallback.Name) | |||
| } | |||
| } | |||
| // AnnotateSpanWithChainInfo ... | |||
| func AnnotateSpanWithChainInfo(span opentracing.Span, chain *tasks.Chain) { | |||
| // tag the span with some info about the chain | |||
| span.SetTag("chain.tasks.length", len(chain.Tasks)) | |||
| // inject the tracing span into the tasks signature headers | |||
| for _, signature := range chain.Tasks { | |||
| signature.Headers = HeadersWithSpan(signature.Headers, span) | |||
| } | |||
| } | |||
| // AnnotateSpanWithGroupInfo ... | |||
| func AnnotateSpanWithGroupInfo(span opentracing.Span, group *tasks.Group, sendConcurrency int) { | |||
| // tag the span with some info about the group | |||
| span.SetTag("group.uuid", group.GroupUUID) | |||
| span.SetTag("group.tasks.length", len(group.Tasks)) | |||
| span.SetTag("group.concurrency", sendConcurrency) | |||
| // encode the task uuids to json, if that fails just dump it in | |||
| if taskUUIDs, err := json.Marshal(group.GetUUIDs()); err == nil { | |||
| span.SetTag("group.tasks", string(taskUUIDs)) | |||
| } else { | |||
| span.SetTag("group.tasks", group.GetUUIDs()) | |||
| } | |||
| // inject the tracing span into the tasks signature headers | |||
| for _, signature := range group.Tasks { | |||
| signature.Headers = HeadersWithSpan(signature.Headers, span) | |||
| } | |||
| } | |||
| // AnnotateSpanWithChordInfo ... | |||
| func AnnotateSpanWithChordInfo(span opentracing.Span, chord *tasks.Chord, sendConcurrency int) { | |||
| // tag the span with chord specific info | |||
| span.SetTag("chord.callback.uuid", chord.Callback.UUID) | |||
| // inject the tracing span into the callback signature | |||
| chord.Callback.Headers = HeadersWithSpan(chord.Callback.Headers, span) | |||
| // tag the span for the group part of the chord | |||
| AnnotateSpanWithGroupInfo(span, chord.Group, sendConcurrency) | |||
| } | |||
| @@ -0,0 +1,393 @@ | |||
| package machinery | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "os" | |||
| "os/signal" | |||
| "syscall" | |||
| "time" | |||
| "github.com/RichardKnop/machinery/v1/backends/amqp" | |||
| "github.com/RichardKnop/machinery/v1/log" | |||
| "github.com/RichardKnop/machinery/v1/retry" | |||
| "github.com/RichardKnop/machinery/v1/tasks" | |||
| "github.com/RichardKnop/machinery/v1/tracing" | |||
| "github.com/opentracing/opentracing-go" | |||
| ) | |||
| // Worker represents a single worker process | |||
| type Worker struct { | |||
| server *Server | |||
| ConsumerTag string | |||
| Concurrency int | |||
| Queue string | |||
| errorHandler func(err error) | |||
| preTaskHandler func(*tasks.Signature) | |||
| postTaskHandler func(*tasks.Signature) | |||
| } | |||
| // Launch starts a new worker process. The worker subscribes | |||
| // to the default queue and processes incoming registered tasks | |||
| func (worker *Worker) Launch() error { | |||
| errorsChan := make(chan error) | |||
| worker.LaunchAsync(errorsChan) | |||
| return <-errorsChan | |||
| } | |||
| // LaunchAsync is a non blocking version of Launch | |||
| func (worker *Worker) LaunchAsync(errorsChan chan<- error) { | |||
| cnf := worker.server.GetConfig() | |||
| broker := worker.server.GetBroker() | |||
| // Log some useful information about worker configuration | |||
| log.INFO.Printf("Launching a worker with the following settings:") | |||
| log.INFO.Printf("- Broker: %s", cnf.Broker) | |||
| if worker.Queue == "" { | |||
| log.INFO.Printf("- DefaultQueue: %s", cnf.DefaultQueue) | |||
| } else { | |||
| log.INFO.Printf("- CustomQueue: %s", worker.Queue) | |||
| } | |||
| log.INFO.Printf("- ResultBackend: %s", cnf.ResultBackend) | |||
| if cnf.AMQP != nil { | |||
| log.INFO.Printf("- AMQP: %s", cnf.AMQP.Exchange) | |||
| log.INFO.Printf(" - Exchange: %s", cnf.AMQP.Exchange) | |||
| log.INFO.Printf(" - ExchangeType: %s", cnf.AMQP.ExchangeType) | |||
| log.INFO.Printf(" - BindingKey: %s", cnf.AMQP.BindingKey) | |||
| log.INFO.Printf(" - PrefetchCount: %d", cnf.AMQP.PrefetchCount) | |||
| } | |||
| // Goroutine to start broker consumption and handle retries when broker connection dies | |||
| go func() { | |||
| for { | |||
| retry, err := broker.StartConsuming(worker.ConsumerTag, worker.Concurrency, worker) | |||
| if retry { | |||
| if worker.errorHandler != nil { | |||
| worker.errorHandler(err) | |||
| } else { | |||
| log.WARNING.Printf("Broker failed with error: %s", err) | |||
| } | |||
| } else { | |||
| errorsChan <- err // stop the goroutine | |||
| return | |||
| } | |||
| } | |||
| }() | |||
| if !cnf.NoUnixSignals { | |||
| sig := make(chan os.Signal, 1) | |||
| signal.Notify(sig, os.Interrupt, syscall.SIGTERM) | |||
| var signalsReceived uint | |||
| // Goroutine Handle SIGINT and SIGTERM signals | |||
| go func() { | |||
| for { | |||
| select { | |||
| case s := <-sig: | |||
| log.WARNING.Printf("Signal received: %v", s) | |||
| signalsReceived++ | |||
| if signalsReceived < 2 { | |||
| // After first Ctrl+C start quitting the worker gracefully | |||
| log.WARNING.Print("Waiting for running tasks to finish before shutting down") | |||
| go func() { | |||
| worker.Quit() | |||
| errorsChan <- errors.New("Worker quit gracefully") | |||
| }() | |||
| } else { | |||
| // Abort the program when user hits Ctrl+C second time in a row | |||
| errorsChan <- errors.New("Worker quit abruptly") | |||
| } | |||
| } | |||
| } | |||
| }() | |||
| } | |||
| } | |||
| // CustomQueue returns Custom Queue of the running worker process | |||
| func (worker *Worker) CustomQueue() string { | |||
| return worker.Queue | |||
| } | |||
| // Quit tears down the running worker process | |||
| func (worker *Worker) Quit() { | |||
| worker.server.GetBroker().StopConsuming() | |||
| } | |||
| // Process handles received tasks and triggers success/error callbacks | |||
| func (worker *Worker) Process(signature *tasks.Signature) error { | |||
| // If the task is not registered with this worker, do not continue | |||
| // but only return nil as we do not want to restart the worker process | |||
| if !worker.server.IsTaskRegistered(signature.Name) { | |||
| return nil | |||
| } | |||
| taskFunc, err := worker.server.GetRegisteredTask(signature.Name) | |||
| if err != nil { | |||
| return nil | |||
| } | |||
| // Update task state to RECEIVED | |||
| if err = worker.server.GetBackend().SetStateReceived(signature); err != nil { | |||
| return fmt.Errorf("Set state to 'received' for task %s returned error: %s", signature.UUID, err) | |||
| } | |||
| // Prepare task for processing | |||
| task, err := tasks.NewWithSignature(taskFunc, signature) | |||
| // if this failed, it means the task is malformed, probably has invalid | |||
| // signature, go directly to task failed without checking whether to retry | |||
| if err != nil { | |||
| worker.taskFailed(signature, err) | |||
| return err | |||
| } | |||
| // try to extract trace span from headers and add it to the function context | |||
| // so it can be used inside the function if it has context.Context as the first | |||
| // argument. Start a new span if it isn't found. | |||
| taskSpan := tracing.StartSpanFromHeaders(signature.Headers, signature.Name) | |||
| tracing.AnnotateSpanWithSignatureInfo(taskSpan, signature) | |||
| task.Context = opentracing.ContextWithSpan(task.Context, taskSpan) | |||
| // Update task state to STARTED | |||
| if err = worker.server.GetBackend().SetStateStarted(signature); err != nil { | |||
| return fmt.Errorf("Set state to 'started' for task %s returned error: %s", signature.UUID, err) | |||
| } | |||
| //Run handler before the task is called | |||
| if worker.preTaskHandler != nil { | |||
| worker.preTaskHandler(signature) | |||
| } | |||
| //Defer run handler for the end of the task | |||
| if worker.postTaskHandler != nil { | |||
| defer worker.postTaskHandler(signature) | |||
| } | |||
| // Call the task | |||
| results, err := task.Call() | |||
| if err != nil { | |||
| // If a tasks.ErrRetryTaskLater was returned from the task, | |||
| // retry the task after specified duration | |||
| retriableErr, ok := interface{}(err).(tasks.ErrRetryTaskLater) | |||
| if ok { | |||
| return worker.retryTaskIn(signature, retriableErr.RetryIn()) | |||
| } | |||
| // Otherwise, execute default retry logic based on signature.RetryCount | |||
| // and signature.RetryTimeout values | |||
| if signature.RetryCount > 0 { | |||
| return worker.taskRetry(signature) | |||
| } | |||
| return worker.taskFailed(signature, err) | |||
| } | |||
| return worker.taskSucceeded(signature, results) | |||
| } | |||
| // retryTask decrements RetryCount counter and republishes the task to the queue | |||
| func (worker *Worker) taskRetry(signature *tasks.Signature) error { | |||
| // Update task state to RETRY | |||
| if err := worker.server.GetBackend().SetStateRetry(signature); err != nil { | |||
| return fmt.Errorf("Set state to 'retry' for task %s returned error: %s", signature.UUID, err) | |||
| } | |||
| // Decrement the retry counter, when it reaches 0, we won't retry again | |||
| signature.RetryCount-- | |||
| // Increase retry timeout | |||
| signature.RetryTimeout = retry.FibonacciNext(signature.RetryTimeout) | |||
| // Delay task by signature.RetryTimeout seconds | |||
| eta := time.Now().UTC().Add(time.Second * time.Duration(signature.RetryTimeout)) | |||
| signature.ETA = &eta | |||
| log.WARNING.Printf("Task %s failed. Going to retry in %d seconds.", signature.UUID, signature.RetryTimeout) | |||
| // Send the task back to the queue | |||
| _, err := worker.server.SendTask(signature) | |||
| return err | |||
| } | |||
| // taskRetryIn republishes the task to the queue with ETA of now + retryIn.Seconds() | |||
| func (worker *Worker) retryTaskIn(signature *tasks.Signature, retryIn time.Duration) error { | |||
| // Update task state to RETRY | |||
| if err := worker.server.GetBackend().SetStateRetry(signature); err != nil { | |||
| return fmt.Errorf("Set state to 'retry' for task %s returned error: %s", signature.UUID, err) | |||
| } | |||
| // Delay task by retryIn duration | |||
| eta := time.Now().UTC().Add(retryIn) | |||
| signature.ETA = &eta | |||
| log.WARNING.Printf("Task %s failed. Going to retry in %.0f seconds.", signature.UUID, retryIn.Seconds()) | |||
| // Send the task back to the queue | |||
| _, err := worker.server.SendTask(signature) | |||
| return err | |||
| } | |||
| // taskSucceeded updates the task state and triggers success callbacks or a | |||
| // chord callback if this was the last task of a group with a chord callback | |||
| func (worker *Worker) taskSucceeded(signature *tasks.Signature, taskResults []*tasks.TaskResult) error { | |||
| // Update task state to SUCCESS | |||
| if err := worker.server.GetBackend().SetStateSuccess(signature, taskResults); err != nil { | |||
| return fmt.Errorf("Set state to 'success' for task %s returned error: %s", signature.UUID, err) | |||
| } | |||
| // Log human readable results of the processed task | |||
| var debugResults = "[]" | |||
| results, err := tasks.ReflectTaskResults(taskResults) | |||
| if err != nil { | |||
| log.WARNING.Print(err) | |||
| } else { | |||
| debugResults = tasks.HumanReadableResults(results) | |||
| } | |||
| log.DEBUG.Printf("Processed task %s. Results = %s", signature.UUID, debugResults) | |||
| // Trigger success callbacks | |||
| for _, successTask := range signature.OnSuccess { | |||
| if signature.Immutable == false { | |||
| // Pass results of the task to success callbacks | |||
| for _, taskResult := range taskResults { | |||
| successTask.Args = append(successTask.Args, tasks.Arg{ | |||
| Type: taskResult.Type, | |||
| Value: taskResult.Value, | |||
| }) | |||
| } | |||
| } | |||
| worker.server.SendTask(successTask) | |||
| } | |||
| // If the task was not part of a group, just return | |||
| if signature.GroupUUID == "" { | |||
| return nil | |||
| } | |||
| // Check if all task in the group has completed | |||
| groupCompleted, err := worker.server.GetBackend().GroupCompleted( | |||
| signature.GroupUUID, | |||
| signature.GroupTaskCount, | |||
| ) | |||
| if err != nil { | |||
| return fmt.Errorf("Completed check for group %s returned error: %s", signature.GroupUUID, err) | |||
| } | |||
| // If the group has not yet completed, just return | |||
| if !groupCompleted { | |||
| return nil | |||
| } | |||
| // Defer purging of group meta queue if we are using AMQP backend | |||
| if worker.hasAMQPBackend() { | |||
| defer worker.server.GetBackend().PurgeGroupMeta(signature.GroupUUID) | |||
| } | |||
| // There is no chord callback, just return | |||
| if signature.ChordCallback == nil { | |||
| return nil | |||
| } | |||
| // Trigger chord callback | |||
| shouldTrigger, err := worker.server.GetBackend().TriggerChord(signature.GroupUUID) | |||
| if err != nil { | |||
| return fmt.Errorf("Triggering chord for group %s returned error: %s", signature.GroupUUID, err) | |||
| } | |||
| // Chord has already been triggered | |||
| if !shouldTrigger { | |||
| return nil | |||
| } | |||
| // Get task states | |||
| taskStates, err := worker.server.GetBackend().GroupTaskStates( | |||
| signature.GroupUUID, | |||
| signature.GroupTaskCount, | |||
| ) | |||
| if err != nil { | |||
| return nil | |||
| } | |||
| // Append group tasks' return values to chord task if it's not immutable | |||
| for _, taskState := range taskStates { | |||
| if !taskState.IsSuccess() { | |||
| return nil | |||
| } | |||
| if signature.ChordCallback.Immutable == false { | |||
| // Pass results of the task to the chord callback | |||
| for _, taskResult := range taskState.Results { | |||
| signature.ChordCallback.Args = append(signature.ChordCallback.Args, tasks.Arg{ | |||
| Type: taskResult.Type, | |||
| Value: taskResult.Value, | |||
| }) | |||
| } | |||
| } | |||
| } | |||
| // Send the chord task | |||
| _, err = worker.server.SendTask(signature.ChordCallback) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // taskFailed updates the task state and triggers error callbacks | |||
| func (worker *Worker) taskFailed(signature *tasks.Signature, taskErr error) error { | |||
| // Update task state to FAILURE | |||
| if err := worker.server.GetBackend().SetStateFailure(signature, taskErr.Error()); err != nil { | |||
| return fmt.Errorf("Set state to 'failure' for task %s returned error: %s", signature.UUID, err) | |||
| } | |||
| if worker.errorHandler != nil { | |||
| worker.errorHandler(taskErr) | |||
| } else { | |||
| log.ERROR.Printf("Failed processing task %s. Error = %v", signature.UUID, taskErr) | |||
| } | |||
| // Trigger error callbacks | |||
| for _, errorTask := range signature.OnError { | |||
| // Pass error as a first argument to error callbacks | |||
| args := append([]tasks.Arg{{ | |||
| Type: "string", | |||
| Value: taskErr.Error(), | |||
| }}, errorTask.Args...) | |||
| errorTask.Args = args | |||
| worker.server.SendTask(errorTask) | |||
| } | |||
| return nil | |||
| } | |||
| // Returns true if the worker uses AMQP backend | |||
| func (worker *Worker) hasAMQPBackend() bool { | |||
| _, ok := worker.server.GetBackend().(*amqp.Backend) | |||
| return ok | |||
| } | |||
| // SetErrorHandler sets a custom error handler for task errors | |||
| // A default behavior is just to log the error after all the retry attempts fail | |||
| func (worker *Worker) SetErrorHandler(handler func(err error)) { | |||
| worker.errorHandler = handler | |||
| } | |||
| //SetPreTaskHandler sets a custom handler func before a job is started | |||
| func (worker *Worker) SetPreTaskHandler(handler func(*tasks.Signature)) { | |||
| worker.preTaskHandler = handler | |||
| } | |||
| //SetPostTaskHandler sets a custom handler for the end of a job | |||
| func (worker *Worker) SetPostTaskHandler(handler func(*tasks.Signature)) { | |||
| worker.postTaskHandler = handler | |||
| } | |||
| //GetServer returns server | |||
| func (worker *Worker) GetServer() *Server { | |||
| return worker.server | |||
| } | |||
| @@ -0,0 +1,24 @@ | |||
| before_script: | |||
| - mkdir -p $GOPATH/src/github.com/redsync | |||
| - ln -s $CI_PROJECT_DIR $GOPATH/src/github.com/redsync/redsync | |||
| - cd $GOPATH/src/github.com/redsync/redsync | |||
| - apt-get update | |||
| - apt-get -y install redis-server | |||
| stages: | |||
| - build | |||
| - test | |||
| build-go-1.5: | |||
| image: golang:1.5 | |||
| stage: build | |||
| script: | |||
| - go get -v | |||
| - go build -v | |||
| test-go-1.5: | |||
| image: golang:1.5 | |||
| stage: test | |||
| script: | |||
| - go get -v -t | |||
| - go test -v | |||
| @@ -0,0 +1,27 @@ | |||
| Copyright (c) 2016, Mahmud Ridwan | |||
| All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are met: | |||
| * Redistributions of source code must retain the above copyright notice, this | |||
| list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above copyright notice, | |||
| this list of conditions and the following disclaimer in the documentation | |||
| and/or other materials provided with the distribution. | |||
| * Neither the name of the Redsync nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
| AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
| DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | |||
| FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
| DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
| SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
| CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
| OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,29 @@ | |||
| # Redsync | |||
| [](https://drone.io/github.com/go-redsync/redsync/latest) | |||
| Redsync provides a Redis-based distributed mutual exclusion lock implementation for Go as described in [this post](http://redis.io/topics/distlock). A reference library (by [antirez](https://github.com/antirez)) for Ruby is available at [github.com/antirez/redlock-rb](https://github.com/antirez/redlock-rb). | |||
| ## Installation | |||
| Install Redsync using the go get command: | |||
| $ go get gopkg.in/redsync.v1 | |||
| The only dependencies are the Go distribution and [Redigo](https://github.com/gomodule/redigo). | |||
| ## Documentation | |||
| - [Reference](https://godoc.org/gopkg.in/redsync.v1) | |||
| ## Contributing | |||
| Contributions are welcome. | |||
| ## License | |||
| Redsync is available under the [BSD (3-Clause) License](https://opensource.org/licenses/BSD-3-Clause). | |||
| ## Disclaimer | |||
| This code implements an algorithm which is currently a proposal, it was not formally analyzed. Make sure to understand how it works before using it in production environments. | |||
| @@ -0,0 +1 @@ | |||
| v1 | |||
| @@ -0,0 +1,4 @@ | |||
| // Package redsync provides a Redis-based distributed mutual exclusion lock implementation as described in the post http://redis.io/topics/distlock. | |||
| // | |||
| // Values containing the types defined in this package should not be copied. | |||
| package redsync | |||
| @@ -0,0 +1,5 @@ | |||
| package redsync | |||
| import "errors" | |||
| var ErrFailed = errors.New("redsync: failed to acquire lock") | |||
| @@ -0,0 +1,145 @@ | |||
| package redsync | |||
| import ( | |||
| "crypto/rand" | |||
| "encoding/base64" | |||
| "sync" | |||
| "time" | |||
| "github.com/gomodule/redigo/redis" | |||
| ) | |||
| // A Mutex is a distributed mutual exclusion lock. | |||
| type Mutex struct { | |||
| name string | |||
| expiry time.Duration | |||
| tries int | |||
| delay time.Duration | |||
| factor float64 | |||
| quorum int | |||
| value string | |||
| until time.Time | |||
| nodem sync.Mutex | |||
| pools []Pool | |||
| } | |||
| // Lock locks m. In case it returns an error on failure, you may retry to acquire the lock by calling this method again. | |||
| func (m *Mutex) Lock() error { | |||
| m.nodem.Lock() | |||
| defer m.nodem.Unlock() | |||
| value, err := m.genValue() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for i := 0; i < m.tries; i++ { | |||
| if i != 0 { | |||
| time.Sleep(m.delay) | |||
| } | |||
| start := time.Now() | |||
| n := 0 | |||
| for _, pool := range m.pools { | |||
| ok := m.acquire(pool, value) | |||
| if ok { | |||
| n++ | |||
| } | |||
| } | |||
| until := time.Now().Add(m.expiry - time.Now().Sub(start) - time.Duration(int64(float64(m.expiry)*m.factor)) + 2*time.Millisecond) | |||
| if n >= m.quorum && time.Now().Before(until) { | |||
| m.value = value | |||
| m.until = until | |||
| return nil | |||
| } | |||
| for _, pool := range m.pools { | |||
| m.release(pool, value) | |||
| } | |||
| } | |||
| return ErrFailed | |||
| } | |||
| // Unlock unlocks m and returns the status of unlock. | |||
| func (m *Mutex) Unlock() bool { | |||
| m.nodem.Lock() | |||
| defer m.nodem.Unlock() | |||
| n := 0 | |||
| for _, pool := range m.pools { | |||
| ok := m.release(pool, m.value) | |||
| if ok { | |||
| n++ | |||
| } | |||
| } | |||
| return n >= m.quorum | |||
| } | |||
| // Extend resets the mutex's expiry and returns the status of expiry extension. | |||
| func (m *Mutex) Extend() bool { | |||
| m.nodem.Lock() | |||
| defer m.nodem.Unlock() | |||
| n := 0 | |||
| for _, pool := range m.pools { | |||
| ok := m.touch(pool, m.value, int(m.expiry/time.Millisecond)) | |||
| if ok { | |||
| n++ | |||
| } | |||
| } | |||
| return n >= m.quorum | |||
| } | |||
| func (m *Mutex) genValue() (string, error) { | |||
| b := make([]byte, 32) | |||
| _, err := rand.Read(b) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| return base64.StdEncoding.EncodeToString(b), nil | |||
| } | |||
| func (m *Mutex) acquire(pool Pool, value string) bool { | |||
| conn := pool.Get() | |||
| defer conn.Close() | |||
| reply, err := redis.String(conn.Do("SET", m.name, value, "NX", "PX", int(m.expiry/time.Millisecond))) | |||
| return err == nil && reply == "OK" | |||
| } | |||
| var deleteScript = redis.NewScript(1, ` | |||
| if redis.call("GET", KEYS[1]) == ARGV[1] then | |||
| return redis.call("DEL", KEYS[1]) | |||
| else | |||
| return 0 | |||
| end | |||
| `) | |||
| func (m *Mutex) release(pool Pool, value string) bool { | |||
| conn := pool.Get() | |||
| defer conn.Close() | |||
| status, err := deleteScript.Do(conn, m.name, value) | |||
| return err == nil && status != 0 | |||
| } | |||
| var touchScript = redis.NewScript(1, ` | |||
| if redis.call("GET", KEYS[1]) == ARGV[1] then | |||
| return redis.call("SET", KEYS[1], ARGV[1], "XX", "PX", ARGV[2]) | |||
| else | |||
| return "ERR" | |||
| end | |||
| `) | |||
| func (m *Mutex) touch(pool Pool, value string, expiry int) bool { | |||
| conn := pool.Get() | |||
| defer conn.Close() | |||
| status, err := redis.String(touchScript.Do(conn, m.name, value, expiry)) | |||
| return err == nil && status != "ERR" | |||
| } | |||
| @@ -0,0 +1,8 @@ | |||
| package redsync | |||
| import "github.com/gomodule/redigo/redis" | |||
| // A Pool maintains a pool of Redis connections. | |||
| type Pool interface { | |||
| Get() redis.Conn | |||
| } | |||
| @@ -0,0 +1,73 @@ | |||
| package redsync | |||
| import "time" | |||
| // Redsync provides a simple method for creating distributed mutexes using multiple Redis connection pools. | |||
| type Redsync struct { | |||
| pools []Pool | |||
| } | |||
| // New creates and returns a new Redsync instance from given Redis connection pools. | |||
| func New(pools []Pool) *Redsync { | |||
| return &Redsync{ | |||
| pools: pools, | |||
| } | |||
| } | |||
| // NewMutex returns a new distributed mutex with given name. | |||
| func (r *Redsync) NewMutex(name string, options ...Option) *Mutex { | |||
| m := &Mutex{ | |||
| name: name, | |||
| expiry: 8 * time.Second, | |||
| tries: 32, | |||
| delay: 500 * time.Millisecond, | |||
| factor: 0.01, | |||
| quorum: len(r.pools)/2 + 1, | |||
| pools: r.pools, | |||
| } | |||
| for _, o := range options { | |||
| o.Apply(m) | |||
| } | |||
| return m | |||
| } | |||
| // An Option configures a mutex. | |||
| type Option interface { | |||
| Apply(*Mutex) | |||
| } | |||
| // OptionFunc is a function that configures a mutex. | |||
| type OptionFunc func(*Mutex) | |||
| // Apply calls f(mutex) | |||
| func (f OptionFunc) Apply(mutex *Mutex) { | |||
| f(mutex) | |||
| } | |||
| // SetExpiry can be used to set the expiry of a mutex to the given value. | |||
| func SetExpiry(expiry time.Duration) Option { | |||
| return OptionFunc(func(m *Mutex) { | |||
| m.expiry = expiry | |||
| }) | |||
| } | |||
| // SetTries can be used to set the number of times lock acquire is attempted. | |||
| func SetTries(tries int) Option { | |||
| return OptionFunc(func(m *Mutex) { | |||
| m.tries = tries | |||
| }) | |||
| } | |||
| // SetRetryDelay can be used to set the amount of time to wait between retries. | |||
| func SetRetryDelay(delay time.Duration) Option { | |||
| return OptionFunc(func(m *Mutex) { | |||
| m.delay = delay | |||
| }) | |||
| } | |||
| // SetDriftFactor can be used to set the clock drift factor. | |||
| func SetDriftFactor(factor float64) Option { | |||
| return OptionFunc(func(m *Mutex) { | |||
| m.factor = factor | |||
| }) | |||
| } | |||
| @@ -0,0 +1,202 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright [yyyy] [name of copyright owner] | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -0,0 +1,3 @@ | |||
| AWS SDK for Go | |||
| Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. | |||
| Copyright 2014-2015 Stripe, Inc. | |||
| @@ -0,0 +1,164 @@ | |||
| // Package awserr represents API error interface accessors for the SDK. | |||
| package awserr | |||
| // An Error wraps lower level errors with code, message and an original error. | |||
| // The underlying concrete error type may also satisfy other interfaces which | |||
| // can be to used to obtain more specific information about the error. | |||
| // | |||
| // Calling Error() or String() will always include the full information about | |||
| // an error based on its underlying type. | |||
| // | |||
| // Example: | |||
| // | |||
| // output, err := s3manage.Upload(svc, input, opts) | |||
| // if err != nil { | |||
| // if awsErr, ok := err.(awserr.Error); ok { | |||
| // // Get error details | |||
| // log.Println("Error:", awsErr.Code(), awsErr.Message()) | |||
| // | |||
| // // Prints out full error message, including original error if there was one. | |||
| // log.Println("Error:", awsErr.Error()) | |||
| // | |||
| // // Get original error | |||
| // if origErr := awsErr.OrigErr(); origErr != nil { | |||
| // // operate on original error. | |||
| // } | |||
| // } else { | |||
| // fmt.Println(err.Error()) | |||
| // } | |||
| // } | |||
| // | |||
| type Error interface { | |||
| // Satisfy the generic error interface. | |||
| error | |||
| // Returns the short phrase depicting the classification of the error. | |||
| Code() string | |||
| // Returns the error details message. | |||
| Message() string | |||
| // Returns the original error if one was set. Nil is returned if not set. | |||
| OrigErr() error | |||
| } | |||
| // BatchError is a batch of errors which also wraps lower level errors with | |||
| // code, message, and original errors. Calling Error() will include all errors | |||
| // that occurred in the batch. | |||
| // | |||
| // Deprecated: Replaced with BatchedErrors. Only defined for backwards | |||
| // compatibility. | |||
| type BatchError interface { | |||
| // Satisfy the generic error interface. | |||
| error | |||
| // Returns the short phrase depicting the classification of the error. | |||
| Code() string | |||
| // Returns the error details message. | |||
| Message() string | |||
| // Returns the original error if one was set. Nil is returned if not set. | |||
| OrigErrs() []error | |||
| } | |||
| // BatchedErrors is a batch of errors which also wraps lower level errors with | |||
| // code, message, and original errors. Calling Error() will include all errors | |||
| // that occurred in the batch. | |||
| // | |||
| // Replaces BatchError | |||
| type BatchedErrors interface { | |||
| // Satisfy the base Error interface. | |||
| Error | |||
| // Returns the original error if one was set. Nil is returned if not set. | |||
| OrigErrs() []error | |||
| } | |||
| // New returns an Error object described by the code, message, and origErr. | |||
| // | |||
| // If origErr satisfies the Error interface it will not be wrapped within a new | |||
| // Error object and will instead be returned. | |||
| func New(code, message string, origErr error) Error { | |||
| var errs []error | |||
| if origErr != nil { | |||
| errs = append(errs, origErr) | |||
| } | |||
| return newBaseError(code, message, errs) | |||
| } | |||
| // NewBatchError returns an BatchedErrors with a collection of errors as an | |||
| // array of errors. | |||
| func NewBatchError(code, message string, errs []error) BatchedErrors { | |||
| return newBaseError(code, message, errs) | |||
| } | |||
| // A RequestFailure is an interface to extract request failure information from | |||
| // an Error such as the request ID of the failed request returned by a service. | |||
| // RequestFailures may not always have a requestID value if the request failed | |||
| // prior to reaching the service such as a connection error. | |||
| // | |||
| // Example: | |||
| // | |||
| // output, err := s3manage.Upload(svc, input, opts) | |||
| // if err != nil { | |||
| // if reqerr, ok := err.(RequestFailure); ok { | |||
| // log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) | |||
| // } else { | |||
| // log.Println("Error:", err.Error()) | |||
| // } | |||
| // } | |||
| // | |||
| // Combined with awserr.Error: | |||
| // | |||
| // output, err := s3manage.Upload(svc, input, opts) | |||
| // if err != nil { | |||
| // if awsErr, ok := err.(awserr.Error); ok { | |||
| // // Generic AWS Error with Code, Message, and original error (if any) | |||
| // fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) | |||
| // | |||
| // if reqErr, ok := err.(awserr.RequestFailure); ok { | |||
| // // A service error occurred | |||
| // fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) | |||
| // } | |||
| // } else { | |||
| // fmt.Println(err.Error()) | |||
| // } | |||
| // } | |||
| // | |||
| type RequestFailure interface { | |||
| Error | |||
| // The status code of the HTTP response. | |||
| StatusCode() int | |||
| // The request ID returned by the service for a request failure. This will | |||
| // be empty if no request ID is available such as the request failed due | |||
| // to a connection error. | |||
| RequestID() string | |||
| } | |||
| // NewRequestFailure returns a wrapped error with additional information for | |||
| // request status code, and service requestID. | |||
| // | |||
| // Should be used to wrap all request which involve service requests. Even if | |||
| // the request failed without a service response, but had an HTTP status code | |||
| // that may be meaningful. | |||
| func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { | |||
| return newRequestError(err, statusCode, reqID) | |||
| } | |||
| // UnmarshalError provides the interface for the SDK failing to unmarshal data. | |||
| type UnmarshalError interface { | |||
| awsError | |||
| Bytes() []byte | |||
| } | |||
| // NewUnmarshalError returns an initialized UnmarshalError error wrapper adding | |||
| // the bytes that fail to unmarshal to the error. | |||
| func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { | |||
| return &unmarshalError{ | |||
| awsError: New("UnmarshalError", msg, err), | |||
| bytes: bytes, | |||
| } | |||
| } | |||
| @@ -0,0 +1,221 @@ | |||
| package awserr | |||
| import ( | |||
| "encoding/hex" | |||
| "fmt" | |||
| ) | |||
| // SprintError returns a string of the formatted error code. | |||
| // | |||
| // Both extra and origErr are optional. If they are included their lines | |||
| // will be added, but if they are not included their lines will be ignored. | |||
| func SprintError(code, message, extra string, origErr error) string { | |||
| msg := fmt.Sprintf("%s: %s", code, message) | |||
| if extra != "" { | |||
| msg = fmt.Sprintf("%s\n\t%s", msg, extra) | |||
| } | |||
| if origErr != nil { | |||
| msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) | |||
| } | |||
| return msg | |||
| } | |||
| // A baseError wraps the code and message which defines an error. It also | |||
| // can be used to wrap an original error object. | |||
| // | |||
| // Should be used as the root for errors satisfying the awserr.Error. Also | |||
| // for any error which does not fit into a specific error wrapper type. | |||
| type baseError struct { | |||
| // Classification of error | |||
| code string | |||
| // Detailed information about error | |||
| message string | |||
| // Optional original error this error is based off of. Allows building | |||
| // chained errors. | |||
| errs []error | |||
| } | |||
| // newBaseError returns an error object for the code, message, and errors. | |||
| // | |||
| // code is a short no whitespace phrase depicting the classification of | |||
| // the error that is being created. | |||
| // | |||
| // message is the free flow string containing detailed information about the | |||
| // error. | |||
| // | |||
| // origErrs is the error objects which will be nested under the new errors to | |||
| // be returned. | |||
| func newBaseError(code, message string, origErrs []error) *baseError { | |||
| b := &baseError{ | |||
| code: code, | |||
| message: message, | |||
| errs: origErrs, | |||
| } | |||
| return b | |||
| } | |||
| // Error returns the string representation of the error. | |||
| // | |||
| // See ErrorWithExtra for formatting. | |||
| // | |||
| // Satisfies the error interface. | |||
| func (b baseError) Error() string { | |||
| size := len(b.errs) | |||
| if size > 0 { | |||
| return SprintError(b.code, b.message, "", errorList(b.errs)) | |||
| } | |||
| return SprintError(b.code, b.message, "", nil) | |||
| } | |||
| // String returns the string representation of the error. | |||
| // Alias for Error to satisfy the stringer interface. | |||
| func (b baseError) String() string { | |||
| return b.Error() | |||
| } | |||
| // Code returns the short phrase depicting the classification of the error. | |||
| func (b baseError) Code() string { | |||
| return b.code | |||
| } | |||
| // Message returns the error details message. | |||
| func (b baseError) Message() string { | |||
| return b.message | |||
| } | |||
| // OrigErr returns the original error if one was set. Nil is returned if no | |||
| // error was set. This only returns the first element in the list. If the full | |||
| // list is needed, use BatchedErrors. | |||
| func (b baseError) OrigErr() error { | |||
| switch len(b.errs) { | |||
| case 0: | |||
| return nil | |||
| case 1: | |||
| return b.errs[0] | |||
| default: | |||
| if err, ok := b.errs[0].(Error); ok { | |||
| return NewBatchError(err.Code(), err.Message(), b.errs[1:]) | |||
| } | |||
| return NewBatchError("BatchedErrors", | |||
| "multiple errors occurred", b.errs) | |||
| } | |||
| } | |||
| // OrigErrs returns the original errors if one was set. An empty slice is | |||
| // returned if no error was set. | |||
| func (b baseError) OrigErrs() []error { | |||
| return b.errs | |||
| } | |||
| // So that the Error interface type can be included as an anonymous field | |||
| // in the requestError struct and not conflict with the error.Error() method. | |||
| type awsError Error | |||
| // A requestError wraps a request or service error. | |||
| // | |||
| // Composed of baseError for code, message, and original error. | |||
| type requestError struct { | |||
| awsError | |||
| statusCode int | |||
| requestID string | |||
| bytes []byte | |||
| } | |||
| // newRequestError returns a wrapped error with additional information for | |||
| // request status code, and service requestID. | |||
| // | |||
| // Should be used to wrap all request which involve service requests. Even if | |||
| // the request failed without a service response, but had an HTTP status code | |||
| // that may be meaningful. | |||
| // | |||
| // Also wraps original errors via the baseError. | |||
| func newRequestError(err Error, statusCode int, requestID string) *requestError { | |||
| return &requestError{ | |||
| awsError: err, | |||
| statusCode: statusCode, | |||
| requestID: requestID, | |||
| } | |||
| } | |||
| // Error returns the string representation of the error. | |||
| // Satisfies the error interface. | |||
| func (r requestError) Error() string { | |||
| extra := fmt.Sprintf("status code: %d, request id: %s", | |||
| r.statusCode, r.requestID) | |||
| return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) | |||
| } | |||
| // String returns the string representation of the error. | |||
| // Alias for Error to satisfy the stringer interface. | |||
| func (r requestError) String() string { | |||
| return r.Error() | |||
| } | |||
| // StatusCode returns the wrapped status code for the error | |||
| func (r requestError) StatusCode() int { | |||
| return r.statusCode | |||
| } | |||
| // RequestID returns the wrapped requestID | |||
| func (r requestError) RequestID() string { | |||
| return r.requestID | |||
| } | |||
| // OrigErrs returns the original errors if one was set. An empty slice is | |||
| // returned if no error was set. | |||
| func (r requestError) OrigErrs() []error { | |||
| if b, ok := r.awsError.(BatchedErrors); ok { | |||
| return b.OrigErrs() | |||
| } | |||
| return []error{r.OrigErr()} | |||
| } | |||
| type unmarshalError struct { | |||
| awsError | |||
| bytes []byte | |||
| } | |||
| // Error returns the string representation of the error. | |||
| // Satisfies the error interface. | |||
| func (e unmarshalError) Error() string { | |||
| extra := hex.Dump(e.bytes) | |||
| return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) | |||
| } | |||
| // String returns the string representation of the error. | |||
| // Alias for Error to satisfy the stringer interface. | |||
| func (e unmarshalError) String() string { | |||
| return e.Error() | |||
| } | |||
| // Bytes returns the bytes that failed to unmarshal. | |||
| func (e unmarshalError) Bytes() []byte { | |||
| return e.bytes | |||
| } | |||
| // An error list that satisfies the golang interface | |||
| type errorList []error | |||
| // Error returns the string representation of the error. | |||
| // | |||
| // Satisfies the error interface. | |||
| func (e errorList) Error() string { | |||
| msg := "" | |||
| // How do we want to handle the array size being zero | |||
| if size := len(e); size > 0 { | |||
| for i := 0; i < size; i++ { | |||
| msg += e[i].Error() | |||
| // We check the next index to see if it is within the slice. | |||
| // If it is, then we append a newline. We do this, because unit tests | |||
| // could be broken with the additional '\n' | |||
| if i+1 < size { | |||
| msg += "\n" | |||
| } | |||
| } | |||
| } | |||
| return msg | |||
| } | |||
| @@ -0,0 +1,108 @@ | |||
| package awsutil | |||
| import ( | |||
| "io" | |||
| "reflect" | |||
| "time" | |||
| ) | |||
| // Copy deeply copies a src structure to dst. Useful for copying request and | |||
| // response structures. | |||
| // | |||
| // Can copy between structs of different type, but will only copy fields which | |||
| // are assignable, and exist in both structs. Fields which are not assignable, | |||
| // or do not exist in both structs are ignored. | |||
| func Copy(dst, src interface{}) { | |||
| dstval := reflect.ValueOf(dst) | |||
| if !dstval.IsValid() { | |||
| panic("Copy dst cannot be nil") | |||
| } | |||
| rcopy(dstval, reflect.ValueOf(src), true) | |||
| } | |||
| // CopyOf returns a copy of src while also allocating the memory for dst. | |||
| // src must be a pointer type or this operation will fail. | |||
| func CopyOf(src interface{}) (dst interface{}) { | |||
| dsti := reflect.New(reflect.TypeOf(src).Elem()) | |||
| dst = dsti.Interface() | |||
| rcopy(dsti, reflect.ValueOf(src), true) | |||
| return | |||
| } | |||
| // rcopy performs a recursive copy of values from the source to destination. | |||
| // | |||
| // root is used to skip certain aspects of the copy which are not valid | |||
| // for the root node of a object. | |||
| func rcopy(dst, src reflect.Value, root bool) { | |||
| if !src.IsValid() { | |||
| return | |||
| } | |||
| switch src.Kind() { | |||
| case reflect.Ptr: | |||
| if _, ok := src.Interface().(io.Reader); ok { | |||
| if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { | |||
| dst.Elem().Set(src) | |||
| } else if dst.CanSet() { | |||
| dst.Set(src) | |||
| } | |||
| } else { | |||
| e := src.Type().Elem() | |||
| if dst.CanSet() && !src.IsNil() { | |||
| if _, ok := src.Interface().(*time.Time); !ok { | |||
| dst.Set(reflect.New(e)) | |||
| } else { | |||
| tempValue := reflect.New(e) | |||
| tempValue.Elem().Set(src.Elem()) | |||
| // Sets time.Time's unexported values | |||
| dst.Set(tempValue) | |||
| } | |||
| } | |||
| if src.Elem().IsValid() { | |||
| // Keep the current root state since the depth hasn't changed | |||
| rcopy(dst.Elem(), src.Elem(), root) | |||
| } | |||
| } | |||
| case reflect.Struct: | |||
| t := dst.Type() | |||
| for i := 0; i < t.NumField(); i++ { | |||
| name := t.Field(i).Name | |||
| srcVal := src.FieldByName(name) | |||
| dstVal := dst.FieldByName(name) | |||
| if srcVal.IsValid() && dstVal.CanSet() { | |||
| rcopy(dstVal, srcVal, false) | |||
| } | |||
| } | |||
| case reflect.Slice: | |||
| if src.IsNil() { | |||
| break | |||
| } | |||
| s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) | |||
| dst.Set(s) | |||
| for i := 0; i < src.Len(); i++ { | |||
| rcopy(dst.Index(i), src.Index(i), false) | |||
| } | |||
| case reflect.Map: | |||
| if src.IsNil() { | |||
| break | |||
| } | |||
| s := reflect.MakeMap(src.Type()) | |||
| dst.Set(s) | |||
| for _, k := range src.MapKeys() { | |||
| v := src.MapIndex(k) | |||
| v2 := reflect.New(v.Type()).Elem() | |||
| rcopy(v2, v, false) | |||
| dst.SetMapIndex(k, v2) | |||
| } | |||
| default: | |||
| // Assign the value if possible. If its not assignable, the value would | |||
| // need to be converted and the impact of that may be unexpected, or is | |||
| // not compatible with the dst type. | |||
| if src.Type().AssignableTo(dst.Type()) { | |||
| dst.Set(src) | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,27 @@ | |||
| package awsutil | |||
| import ( | |||
| "reflect" | |||
| ) | |||
| // DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. | |||
| // In addition to this, this method will also dereference the input values if | |||
| // possible so the DeepEqual performed will not fail if one parameter is a | |||
| // pointer and the other is not. | |||
| // | |||
| // DeepEqual will not perform indirection of nested values of the input parameters. | |||
| func DeepEqual(a, b interface{}) bool { | |||
| ra := reflect.Indirect(reflect.ValueOf(a)) | |||
| rb := reflect.Indirect(reflect.ValueOf(b)) | |||
| if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { | |||
| // If the elements are both nil, and of the same type they are equal | |||
| // If they are of different types they are not equal | |||
| return reflect.TypeOf(a) == reflect.TypeOf(b) | |||
| } else if raValid != rbValid { | |||
| // Both values must be valid to be equal | |||
| return false | |||
| } | |||
| return reflect.DeepEqual(ra.Interface(), rb.Interface()) | |||
| } | |||
| @@ -0,0 +1,221 @@ | |||
| package awsutil | |||
| import ( | |||
| "reflect" | |||
| "regexp" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/jmespath/go-jmespath" | |||
| ) | |||
| var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) | |||
| // rValuesAtPath returns a slice of values found in value v. The values | |||
| // in v are explored recursively so all nested values are collected. | |||
| func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { | |||
| pathparts := strings.Split(path, "||") | |||
| if len(pathparts) > 1 { | |||
| for _, pathpart := range pathparts { | |||
| vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) | |||
| if len(vals) > 0 { | |||
| return vals | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} | |||
| components := strings.Split(path, ".") | |||
| for len(values) > 0 && len(components) > 0 { | |||
| var index *int64 | |||
| var indexStar bool | |||
| c := strings.TrimSpace(components[0]) | |||
| if c == "" { // no actual component, illegal syntax | |||
| return nil | |||
| } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { | |||
| // TODO normalize case for user | |||
| return nil // don't support unexported fields | |||
| } | |||
| // parse this component | |||
| if m := indexRe.FindStringSubmatch(c); m != nil { | |||
| c = m[1] | |||
| if m[2] == "" { | |||
| index = nil | |||
| indexStar = true | |||
| } else { | |||
| i, _ := strconv.ParseInt(m[2], 10, 32) | |||
| index = &i | |||
| indexStar = false | |||
| } | |||
| } | |||
| nextvals := []reflect.Value{} | |||
| for _, value := range values { | |||
| // pull component name out of struct member | |||
| if value.Kind() != reflect.Struct { | |||
| continue | |||
| } | |||
| if c == "*" { // pull all members | |||
| for i := 0; i < value.NumField(); i++ { | |||
| if f := reflect.Indirect(value.Field(i)); f.IsValid() { | |||
| nextvals = append(nextvals, f) | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| value = value.FieldByNameFunc(func(name string) bool { | |||
| if c == name { | |||
| return true | |||
| } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { | |||
| return true | |||
| } | |||
| return false | |||
| }) | |||
| if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { | |||
| if !value.IsNil() { | |||
| value.Set(reflect.Zero(value.Type())) | |||
| } | |||
| return []reflect.Value{value} | |||
| } | |||
| if createPath && value.Kind() == reflect.Ptr && value.IsNil() { | |||
| // TODO if the value is the terminus it should not be created | |||
| // if the value to be set to its position is nil. | |||
| value.Set(reflect.New(value.Type().Elem())) | |||
| value = value.Elem() | |||
| } else { | |||
| value = reflect.Indirect(value) | |||
| } | |||
| if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { | |||
| if !createPath && value.IsNil() { | |||
| value = reflect.ValueOf(nil) | |||
| } | |||
| } | |||
| if value.IsValid() { | |||
| nextvals = append(nextvals, value) | |||
| } | |||
| } | |||
| values = nextvals | |||
| if indexStar || index != nil { | |||
| nextvals = []reflect.Value{} | |||
| for _, valItem := range values { | |||
| value := reflect.Indirect(valItem) | |||
| if value.Kind() != reflect.Slice { | |||
| continue | |||
| } | |||
| if indexStar { // grab all indices | |||
| for i := 0; i < value.Len(); i++ { | |||
| idx := reflect.Indirect(value.Index(i)) | |||
| if idx.IsValid() { | |||
| nextvals = append(nextvals, idx) | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| // pull out index | |||
| i := int(*index) | |||
| if i >= value.Len() { // check out of bounds | |||
| if createPath { | |||
| // TODO resize slice | |||
| } else { | |||
| continue | |||
| } | |||
| } else if i < 0 { // support negative indexing | |||
| i = value.Len() + i | |||
| } | |||
| value = reflect.Indirect(value.Index(i)) | |||
| if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { | |||
| if !createPath && value.IsNil() { | |||
| value = reflect.ValueOf(nil) | |||
| } | |||
| } | |||
| if value.IsValid() { | |||
| nextvals = append(nextvals, value) | |||
| } | |||
| } | |||
| values = nextvals | |||
| } | |||
| components = components[1:] | |||
| } | |||
| return values | |||
| } | |||
| // ValuesAtPath returns a list of values at the case insensitive lexical | |||
| // path inside of a structure. | |||
| func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { | |||
| result, err := jmespath.Search(path, i) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| v := reflect.ValueOf(result) | |||
| if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { | |||
| return nil, nil | |||
| } | |||
| if s, ok := result.([]interface{}); ok { | |||
| return s, err | |||
| } | |||
| if v.Kind() == reflect.Map && v.Len() == 0 { | |||
| return nil, nil | |||
| } | |||
| if v.Kind() == reflect.Slice { | |||
| out := make([]interface{}, v.Len()) | |||
| for i := 0; i < v.Len(); i++ { | |||
| out[i] = v.Index(i).Interface() | |||
| } | |||
| return out, nil | |||
| } | |||
| return []interface{}{result}, nil | |||
| } | |||
| // SetValueAtPath sets a value at the case insensitive lexical path inside | |||
| // of a structure. | |||
| func SetValueAtPath(i interface{}, path string, v interface{}) { | |||
| rvals := rValuesAtPath(i, path, true, false, v == nil) | |||
| for _, rval := range rvals { | |||
| if rval.Kind() == reflect.Ptr && rval.IsNil() { | |||
| continue | |||
| } | |||
| setValue(rval, v) | |||
| } | |||
| } | |||
| func setValue(dstVal reflect.Value, src interface{}) { | |||
| if dstVal.Kind() == reflect.Ptr { | |||
| dstVal = reflect.Indirect(dstVal) | |||
| } | |||
| srcVal := reflect.ValueOf(src) | |||
| if !srcVal.IsValid() { // src is literal nil | |||
| if dstVal.CanAddr() { | |||
| // Convert to pointer so that pointer's value can be nil'ed | |||
| // dstVal = dstVal.Addr() | |||
| } | |||
| dstVal.Set(reflect.Zero(dstVal.Type())) | |||
| } else if srcVal.Kind() == reflect.Ptr { | |||
| if srcVal.IsNil() { | |||
| srcVal = reflect.Zero(dstVal.Type()) | |||
| } else { | |||
| srcVal = reflect.ValueOf(src).Elem() | |||
| } | |||
| dstVal.Set(srcVal) | |||
| } else { | |||
| dstVal.Set(srcVal) | |||
| } | |||
| } | |||
| @@ -0,0 +1,113 @@ | |||
| package awsutil | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "io" | |||
| "reflect" | |||
| "strings" | |||
| ) | |||
| // Prettify returns the string representation of a value. | |||
| func Prettify(i interface{}) string { | |||
| var buf bytes.Buffer | |||
| prettify(reflect.ValueOf(i), 0, &buf) | |||
| return buf.String() | |||
| } | |||
| // prettify will recursively walk value v to build a textual | |||
| // representation of the value. | |||
| func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { | |||
| for v.Kind() == reflect.Ptr { | |||
| v = v.Elem() | |||
| } | |||
| switch v.Kind() { | |||
| case reflect.Struct: | |||
| strtype := v.Type().String() | |||
| if strtype == "time.Time" { | |||
| fmt.Fprintf(buf, "%s", v.Interface()) | |||
| break | |||
| } else if strings.HasPrefix(strtype, "io.") { | |||
| buf.WriteString("<buffer>") | |||
| break | |||
| } | |||
| buf.WriteString("{\n") | |||
| names := []string{} | |||
| for i := 0; i < v.Type().NumField(); i++ { | |||
| name := v.Type().Field(i).Name | |||
| f := v.Field(i) | |||
| if name[0:1] == strings.ToLower(name[0:1]) { | |||
| continue // ignore unexported fields | |||
| } | |||
| if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { | |||
| continue // ignore unset fields | |||
| } | |||
| names = append(names, name) | |||
| } | |||
| for i, n := range names { | |||
| val := v.FieldByName(n) | |||
| buf.WriteString(strings.Repeat(" ", indent+2)) | |||
| buf.WriteString(n + ": ") | |||
| prettify(val, indent+2, buf) | |||
| if i < len(names)-1 { | |||
| buf.WriteString(",\n") | |||
| } | |||
| } | |||
| buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") | |||
| case reflect.Slice: | |||
| strtype := v.Type().String() | |||
| if strtype == "[]uint8" { | |||
| fmt.Fprintf(buf, "<binary> len %d", v.Len()) | |||
| break | |||
| } | |||
| nl, id, id2 := "", "", "" | |||
| if v.Len() > 3 { | |||
| nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) | |||
| } | |||
| buf.WriteString("[" + nl) | |||
| for i := 0; i < v.Len(); i++ { | |||
| buf.WriteString(id2) | |||
| prettify(v.Index(i), indent+2, buf) | |||
| if i < v.Len()-1 { | |||
| buf.WriteString("," + nl) | |||
| } | |||
| } | |||
| buf.WriteString(nl + id + "]") | |||
| case reflect.Map: | |||
| buf.WriteString("{\n") | |||
| for i, k := range v.MapKeys() { | |||
| buf.WriteString(strings.Repeat(" ", indent+2)) | |||
| buf.WriteString(k.String() + ": ") | |||
| prettify(v.MapIndex(k), indent+2, buf) | |||
| if i < v.Len()-1 { | |||
| buf.WriteString(",\n") | |||
| } | |||
| } | |||
| buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") | |||
| default: | |||
| if !v.IsValid() { | |||
| fmt.Fprint(buf, "<invalid value>") | |||
| return | |||
| } | |||
| format := "%v" | |||
| switch v.Interface().(type) { | |||
| case string: | |||
| format = "%q" | |||
| case io.ReadSeeker, io.Reader: | |||
| format = "buffer(%p)" | |||
| } | |||
| fmt.Fprintf(buf, format, v.Interface()) | |||
| } | |||
| } | |||
| @@ -0,0 +1,88 @@ | |||
| package awsutil | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "reflect" | |||
| "strings" | |||
| ) | |||
| // StringValue returns the string representation of a value. | |||
| func StringValue(i interface{}) string { | |||
| var buf bytes.Buffer | |||
| stringValue(reflect.ValueOf(i), 0, &buf) | |||
| return buf.String() | |||
| } | |||
| func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { | |||
| for v.Kind() == reflect.Ptr { | |||
| v = v.Elem() | |||
| } | |||
| switch v.Kind() { | |||
| case reflect.Struct: | |||
| buf.WriteString("{\n") | |||
| for i := 0; i < v.Type().NumField(); i++ { | |||
| ft := v.Type().Field(i) | |||
| fv := v.Field(i) | |||
| if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { | |||
| continue // ignore unexported fields | |||
| } | |||
| if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { | |||
| continue // ignore unset fields | |||
| } | |||
| buf.WriteString(strings.Repeat(" ", indent+2)) | |||
| buf.WriteString(ft.Name + ": ") | |||
| if tag := ft.Tag.Get("sensitive"); tag == "true" { | |||
| buf.WriteString("<sensitive>") | |||
| } else { | |||
| stringValue(fv, indent+2, buf) | |||
| } | |||
| buf.WriteString(",\n") | |||
| } | |||
| buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") | |||
| case reflect.Slice: | |||
| nl, id, id2 := "", "", "" | |||
| if v.Len() > 3 { | |||
| nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) | |||
| } | |||
| buf.WriteString("[" + nl) | |||
| for i := 0; i < v.Len(); i++ { | |||
| buf.WriteString(id2) | |||
| stringValue(v.Index(i), indent+2, buf) | |||
| if i < v.Len()-1 { | |||
| buf.WriteString("," + nl) | |||
| } | |||
| } | |||
| buf.WriteString(nl + id + "]") | |||
| case reflect.Map: | |||
| buf.WriteString("{\n") | |||
| for i, k := range v.MapKeys() { | |||
| buf.WriteString(strings.Repeat(" ", indent+2)) | |||
| buf.WriteString(k.String() + ": ") | |||
| stringValue(v.MapIndex(k), indent+2, buf) | |||
| if i < v.Len()-1 { | |||
| buf.WriteString(",\n") | |||
| } | |||
| } | |||
| buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") | |||
| default: | |||
| format := "%v" | |||
| switch v.Interface().(type) { | |||
| case string: | |||
| format = "%q" | |||
| } | |||
| fmt.Fprintf(buf, format, v.Interface()) | |||
| } | |||
| } | |||
| @@ -0,0 +1,97 @@ | |||
| package client | |||
| import ( | |||
| "fmt" | |||
| "github.com/aws/aws-sdk-go/aws" | |||
| "github.com/aws/aws-sdk-go/aws/client/metadata" | |||
| "github.com/aws/aws-sdk-go/aws/request" | |||
| ) | |||
| // A Config provides configuration to a service client instance. | |||
| type Config struct { | |||
| Config *aws.Config | |||
| Handlers request.Handlers | |||
| PartitionID string | |||
| Endpoint string | |||
| SigningRegion string | |||
| SigningName string | |||
| // States that the signing name did not come from a modeled source but | |||
| // was derived based on other data. Used by service client constructors | |||
| // to determine if the signin name can be overridden based on metadata the | |||
| // service has. | |||
| SigningNameDerived bool | |||
| } | |||
| // ConfigProvider provides a generic way for a service client to receive | |||
| // the ClientConfig without circular dependencies. | |||
| type ConfigProvider interface { | |||
| ClientConfig(serviceName string, cfgs ...*aws.Config) Config | |||
| } | |||
| // ConfigNoResolveEndpointProvider same as ConfigProvider except it will not | |||
| // resolve the endpoint automatically. The service client's endpoint must be | |||
| // provided via the aws.Config.Endpoint field. | |||
| type ConfigNoResolveEndpointProvider interface { | |||
| ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config | |||
| } | |||
| // A Client implements the base client request and response handling | |||
| // used by all service clients. | |||
| type Client struct { | |||
| request.Retryer | |||
| metadata.ClientInfo | |||
| Config aws.Config | |||
| Handlers request.Handlers | |||
| } | |||
| // New will return a pointer to a new initialized service client. | |||
| func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { | |||
| svc := &Client{ | |||
| Config: cfg, | |||
| ClientInfo: info, | |||
| Handlers: handlers.Copy(), | |||
| } | |||
| switch retryer, ok := cfg.Retryer.(request.Retryer); { | |||
| case ok: | |||
| svc.Retryer = retryer | |||
| case cfg.Retryer != nil && cfg.Logger != nil: | |||
| s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) | |||
| cfg.Logger.Log(s) | |||
| fallthrough | |||
| default: | |||
| maxRetries := aws.IntValue(cfg.MaxRetries) | |||
| if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { | |||
| maxRetries = DefaultRetryerMaxNumRetries | |||
| } | |||
| svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} | |||
| } | |||
| svc.AddDebugHandlers() | |||
| for _, option := range options { | |||
| option(svc) | |||
| } | |||
| return svc | |||
| } | |||
| // NewRequest returns a new Request pointer for the service API | |||
| // operation and parameters. | |||
| func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { | |||
| return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) | |||
| } | |||
| // AddDebugHandlers injects debug logging handlers into the service to log request | |||
| // debug information. | |||
| func (c *Client) AddDebugHandlers() { | |||
| if !c.Config.LogLevel.AtLeast(aws.LogDebug) { | |||
| return | |||
| } | |||
| c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) | |||
| c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) | |||
| } | |||
| @@ -0,0 +1,177 @@ | |||
| package client | |||
| import ( | |||
| "math" | |||
| "strconv" | |||
| "time" | |||
| "github.com/aws/aws-sdk-go/aws/request" | |||
| "github.com/aws/aws-sdk-go/internal/sdkrand" | |||
| ) | |||
| // DefaultRetryer implements basic retry logic using exponential backoff for | |||
| // most services. If you want to implement custom retry logic, you can implement the | |||
| // request.Retryer interface. | |||
| // | |||
| type DefaultRetryer struct { | |||
| // Num max Retries is the number of max retries that will be performed. | |||
| // By default, this is zero. | |||
| NumMaxRetries int | |||
| // MinRetryDelay is the minimum retry delay after which retry will be performed. | |||
| // If not set, the value is 0ns. | |||
| MinRetryDelay time.Duration | |||
| // MinThrottleRetryDelay is the minimum retry delay when throttled. | |||
| // If not set, the value is 0ns. | |||
| MinThrottleDelay time.Duration | |||
| // MaxRetryDelay is the maximum retry delay before which retry must be performed. | |||
| // If not set, the value is 0ns. | |||
| MaxRetryDelay time.Duration | |||
| // MaxThrottleDelay is the maximum retry delay when throttled. | |||
| // If not set, the value is 0ns. | |||
| MaxThrottleDelay time.Duration | |||
| } | |||
| const ( | |||
| // DefaultRetryerMaxNumRetries sets maximum number of retries | |||
| DefaultRetryerMaxNumRetries = 3 | |||
| // DefaultRetryerMinRetryDelay sets minimum retry delay | |||
| DefaultRetryerMinRetryDelay = 30 * time.Millisecond | |||
| // DefaultRetryerMinThrottleDelay sets minimum delay when throttled | |||
| DefaultRetryerMinThrottleDelay = 500 * time.Millisecond | |||
| // DefaultRetryerMaxRetryDelay sets maximum retry delay | |||
| DefaultRetryerMaxRetryDelay = 300 * time.Second | |||
| // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled | |||
| DefaultRetryerMaxThrottleDelay = 300 * time.Second | |||
| ) | |||
| // MaxRetries returns the number of maximum returns the service will use to make | |||
| // an individual API request. | |||
| func (d DefaultRetryer) MaxRetries() int { | |||
| return d.NumMaxRetries | |||
| } | |||
| // setRetryerDefaults sets the default values of the retryer if not set | |||
| func (d *DefaultRetryer) setRetryerDefaults() { | |||
| if d.MinRetryDelay == 0 { | |||
| d.MinRetryDelay = DefaultRetryerMinRetryDelay | |||
| } | |||
| if d.MaxRetryDelay == 0 { | |||
| d.MaxRetryDelay = DefaultRetryerMaxRetryDelay | |||
| } | |||
| if d.MinThrottleDelay == 0 { | |||
| d.MinThrottleDelay = DefaultRetryerMinThrottleDelay | |||
| } | |||
| if d.MaxThrottleDelay == 0 { | |||
| d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay | |||
| } | |||
| } | |||
| // RetryRules returns the delay duration before retrying this request again | |||
| func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { | |||
| // if number of max retries is zero, no retries will be performed. | |||
| if d.NumMaxRetries == 0 { | |||
| return 0 | |||
| } | |||
| // Sets default value for retryer members | |||
| d.setRetryerDefaults() | |||
| // minDelay is the minimum retryer delay | |||
| minDelay := d.MinRetryDelay | |||
| var initialDelay time.Duration | |||
| isThrottle := r.IsErrorThrottle() | |||
| if isThrottle { | |||
| if delay, ok := getRetryAfterDelay(r); ok { | |||
| initialDelay = delay | |||
| } | |||
| minDelay = d.MinThrottleDelay | |||
| } | |||
| retryCount := r.RetryCount | |||
| // maxDelay the maximum retryer delay | |||
| maxDelay := d.MaxRetryDelay | |||
| if isThrottle { | |||
| maxDelay = d.MaxThrottleDelay | |||
| } | |||
| var delay time.Duration | |||
| // Logic to cap the retry count based on the minDelay provided | |||
| actualRetryCount := int(math.Log2(float64(minDelay))) + 1 | |||
| if actualRetryCount < 63-retryCount { | |||
| delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay) | |||
| if delay > maxDelay { | |||
| delay = getJitterDelay(maxDelay / 2) | |||
| } | |||
| } else { | |||
| delay = getJitterDelay(maxDelay / 2) | |||
| } | |||
| return delay + initialDelay | |||
| } | |||
| // getJitterDelay returns a jittered delay for retry | |||
| func getJitterDelay(duration time.Duration) time.Duration { | |||
| return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) | |||
| } | |||
| // ShouldRetry returns true if the request should be retried. | |||
| func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { | |||
| // ShouldRetry returns false if number of max retries is 0. | |||
| if d.NumMaxRetries == 0 { | |||
| return false | |||
| } | |||
| // If one of the other handlers already set the retry state | |||
| // we don't want to override it based on the service's state | |||
| if r.Retryable != nil { | |||
| return *r.Retryable | |||
| } | |||
| return r.IsErrorRetryable() || r.IsErrorThrottle() | |||
| } | |||
| // This will look in the Retry-After header, RFC 7231, for how long | |||
| // it will wait before attempting another request | |||
| func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { | |||
| if !canUseRetryAfterHeader(r) { | |||
| return 0, false | |||
| } | |||
| delayStr := r.HTTPResponse.Header.Get("Retry-After") | |||
| if len(delayStr) == 0 { | |||
| return 0, false | |||
| } | |||
| delay, err := strconv.Atoi(delayStr) | |||
| if err != nil { | |||
| return 0, false | |||
| } | |||
| return time.Duration(delay) * time.Second, true | |||
| } | |||
| // Will look at the status code to see if the retry header pertains to | |||
| // the status code. | |||
| func canUseRetryAfterHeader(r *request.Request) bool { | |||
| switch r.HTTPResponse.StatusCode { | |||
| case 429: | |||
| case 503: | |||
| default: | |||
| return false | |||
| } | |||
| return true | |||
| } | |||