| @@ -143,17 +143,6 @@ | |||
| packages = ["."] | |||
| revision = "098da33fde5f9220736531b3cb26a2dec86a8367" | |||
| [[projects]] | |||
| name = "github.com/coreos/etcd" | |||
| packages = ["error"] | |||
| revision = "01c303113d0a3d5a8075864321c3aedb72035bdd" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/coreos/go-etcd" | |||
| packages = ["etcd"] | |||
| revision = "003851be7bb0694fe3cc457a49529a19388ee7cf" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/couchbase/vellum" | |||
| @@ -306,11 +295,6 @@ | |||
| packages = ["."] | |||
| revision = "cb1d0ca71f42d3ee1bf4aba7daa16099bc31a7e9" | |||
| [[projects]] | |||
| name = "github.com/go-xorm/tidb" | |||
| packages = ["."] | |||
| revision = "21e49190ce47a766fa741cf7edc831a30c12c6ac" | |||
| [[projects]] | |||
| name = "github.com/go-xorm/xorm" | |||
| packages = ["."] | |||
| @@ -370,11 +354,6 @@ | |||
| packages = ["."] | |||
| revision = "8fb95d837f7d6db1913fecfd7bcc5333e6499596" | |||
| [[projects]] | |||
| name = "github.com/juju/errors" | |||
| packages = ["."] | |||
| revision = "b2c7a7da5b2995941048f60146e67702a292e468" | |||
| [[projects]] | |||
| name = "github.com/kballard/go-shellquote" | |||
| packages = ["."] | |||
| @@ -497,134 +476,12 @@ | |||
| packages = ["."] | |||
| revision = "891127d8d1b52734debe1b3c3d7e747502b6c366" | |||
| [[projects]] | |||
| name = "github.com/ngaut/deadline" | |||
| packages = ["."] | |||
| revision = "fae8f9dfd7048de16575b9d4c255278e38c28a4f" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/ngaut/go-zookeeper" | |||
| packages = ["zk"] | |||
| revision = "9c3719e318c7cfd072e41eb48cb71fcaa49d5e05" | |||
| [[projects]] | |||
| name = "github.com/ngaut/log" | |||
| packages = ["."] | |||
| revision = "d2af3a61f64d093457fb23b25d20f4ce3cd551ce" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/ngaut/pools" | |||
| packages = ["."] | |||
| revision = "b7bc8c42aac787667ba45adea78233f53f548443" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/ngaut/sync2" | |||
| packages = ["."] | |||
| revision = "7a24ed77b2efb460c1468b7dc917821c66e80e55" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/ngaut/tso" | |||
| packages = [ | |||
| "client", | |||
| "proto", | |||
| "util" | |||
| ] | |||
| revision = "118f6c141d58f1e72577ff61f43f649bf39355ee" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/ngaut/zkhelper" | |||
| packages = ["."] | |||
| revision = "6738bdc138d469112c6687fbfcfe049ccabd6a0a" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/petar/GoLLRB" | |||
| packages = ["llrb"] | |||
| revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" | |||
| [[projects]] | |||
| name = "github.com/philhofer/fwd" | |||
| packages = ["."] | |||
| revision = "bb6d471dc95d4fe11e432687f8b70ff496cf3136" | |||
| version = "v1.0.0" | |||
| [[projects]] | |||
| name = "github.com/pingcap/go-hbase" | |||
| packages = [ | |||
| ".", | |||
| "iohelper", | |||
| "proto" | |||
| ] | |||
| revision = "7a98d1fe4e9e115de8c77ae0e158c0d08732c550" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/pingcap/go-themis" | |||
| packages = [ | |||
| ".", | |||
| "oracle", | |||
| "oracle/oracles" | |||
| ] | |||
| revision = "dbb996606c1d1fe8571fd9ac6da2254c76d2c5c9" | |||
| [[projects]] | |||
| name = "github.com/pingcap/tidb" | |||
| packages = [ | |||
| ".", | |||
| "ast", | |||
| "column", | |||
| "context", | |||
| "ddl", | |||
| "domain", | |||
| "evaluator", | |||
| "executor", | |||
| "infoschema", | |||
| "inspectkv", | |||
| "kv", | |||
| "kv/memkv", | |||
| "meta", | |||
| "meta/autoid", | |||
| "model", | |||
| "mysql", | |||
| "optimizer", | |||
| "optimizer/plan", | |||
| "parser", | |||
| "parser/opcode", | |||
| "perfschema", | |||
| "privilege", | |||
| "privilege/privileges", | |||
| "sessionctx", | |||
| "sessionctx/autocommit", | |||
| "sessionctx/db", | |||
| "sessionctx/forupdate", | |||
| "sessionctx/variable", | |||
| "store/hbase", | |||
| "store/localstore", | |||
| "store/localstore/boltdb", | |||
| "store/localstore/engine", | |||
| "store/localstore/goleveldb", | |||
| "structure", | |||
| "table", | |||
| "table/tables", | |||
| "terror", | |||
| "util", | |||
| "util/bytes", | |||
| "util/charset", | |||
| "util/codec", | |||
| "util/distinct", | |||
| "util/hack", | |||
| "util/segmentmap", | |||
| "util/sqlexec", | |||
| "util/stringutil", | |||
| "util/types" | |||
| ] | |||
| revision = "33197485abe227dcb254644cf5081c9a3c281669" | |||
| [[projects]] | |||
| name = "github.com/pmezard/go-difflib" | |||
| packages = ["difflib"] | |||
| @@ -673,24 +530,6 @@ | |||
| revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" | |||
| version = "v1.2.1" | |||
| [[projects]] | |||
| name = "github.com/syndtr/goleveldb" | |||
| packages = [ | |||
| "leveldb", | |||
| "leveldb/cache", | |||
| "leveldb/comparer", | |||
| "leveldb/errors", | |||
| "leveldb/filter", | |||
| "leveldb/iterator", | |||
| "leveldb/journal", | |||
| "leveldb/memdb", | |||
| "leveldb/opt", | |||
| "leveldb/storage", | |||
| "leveldb/table", | |||
| "leveldb/util" | |||
| ] | |||
| revision = "917f41c560270110ceb73c5b38be2a9127387071" | |||
| [[projects]] | |||
| branch = "master" | |||
| name = "github.com/tinylib/msgp" | |||
| @@ -703,17 +542,6 @@ | |||
| packages = ["."] | |||
| revision = "d21a03e0b1d9fc1df59ff54e7a513655c1748b0c" | |||
| [[projects]] | |||
| name = "github.com/twinj/uuid" | |||
| packages = ["."] | |||
| revision = "89173bcdda19db0eb88aef1e1cb1cb2505561d31" | |||
| version = "0.10.0" | |||
| [[projects]] | |||
| name = "github.com/ugorji/go" | |||
| packages = ["codec"] | |||
| revision = "c062049c1793b01a3cc3fe786108edabbaf7756b" | |||
| [[projects]] | |||
| name = "github.com/urfave/cli" | |||
| packages = ["."] | |||
| @@ -873,6 +701,6 @@ | |||
| [solve-meta] | |||
| analyzer-name = "dep" | |||
| analyzer-version = 1 | |||
| inputs-digest = "96c83a3502bd50c5ca8e4d9b4145172267630270e587c79b7253156725eeb9b8" | |||
| inputs-digest = "59451a3ad1d449f75c5e9035daf542a377c5c4a397e219bebec0aa0007ab9c39" | |||
| solver-name = "gps-cdcl" | |||
| solver-version = 1 | |||
| @@ -30,11 +30,6 @@ ignored = ["google.golang.org/appengine*"] | |||
| revision = "f2499483f923065a842d38eb4c7f1927e6fc6e6d" | |||
| name = "golang.org/x/net" | |||
| [[constraint]] | |||
| #version = "v1.0.0" | |||
| revision = "33197485abe227dcb254644cf5081c9a3c281669" | |||
| name = "github.com/pingcap/tidb" | |||
| [[override]] | |||
| name = "github.com/go-xorm/xorm" | |||
| #version = "0.6.5" | |||
| @@ -1,18 +0,0 @@ | |||
| // +build tidb | |||
| // Copyright 2015 The Gogs Authors. All rights reserved. | |||
| // Use of this source code is governed by a MIT-style | |||
| // license that can be found in the LICENSE file. | |||
| package models | |||
| import ( | |||
| _ "github.com/go-xorm/tidb" | |||
| "github.com/ngaut/log" | |||
| _ "github.com/pingcap/tidb" | |||
| ) | |||
| func init() { | |||
| EnableTiDB = true | |||
| log.SetLevelByString("error") | |||
| } | |||
| @@ -1,202 +0,0 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright [yyyy] [name of copyright owner] | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -1,5 +0,0 @@ | |||
| CoreOS Project | |||
| Copyright 2014 CoreOS, Inc | |||
| This product includes software developed at CoreOS, Inc. | |||
| (http://www.coreos.com/). | |||
| @@ -1,162 +0,0 @@ | |||
| // Copyright 2015 CoreOS, Inc. | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Package error describes errors in etcd project. When any change happens, | |||
| // Documentation/errorcode.md needs to be updated correspondingly. | |||
| package error | |||
| import ( | |||
| "encoding/json" | |||
| "fmt" | |||
| "net/http" | |||
| ) | |||
| var errors = map[int]string{ | |||
| // command related errors | |||
| EcodeKeyNotFound: "Key not found", | |||
| EcodeTestFailed: "Compare failed", //test and set | |||
| EcodeNotFile: "Not a file", | |||
| ecodeNoMorePeer: "Reached the max number of peers in the cluster", | |||
| EcodeNotDir: "Not a directory", | |||
| EcodeNodeExist: "Key already exists", // create | |||
| ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd", | |||
| EcodeRootROnly: "Root is read only", | |||
| EcodeDirNotEmpty: "Directory not empty", | |||
| ecodeExistingPeerAddr: "Peer address has existed", | |||
| EcodeUnauthorized: "The request requires user authentication", | |||
| // Post form related errors | |||
| ecodeValueRequired: "Value is Required in POST form", | |||
| EcodePrevValueRequired: "PrevValue is Required in POST form", | |||
| EcodeTTLNaN: "The given TTL in POST form is not a number", | |||
| EcodeIndexNaN: "The given index in POST form is not a number", | |||
| ecodeValueOrTTLRequired: "Value or TTL is required in POST form", | |||
| ecodeTimeoutNaN: "The given timeout in POST form is not a number", | |||
| ecodeNameRequired: "Name is required in POST form", | |||
| ecodeIndexOrValueRequired: "Index or value is required", | |||
| ecodeIndexValueMutex: "Index and value cannot both be specified", | |||
| EcodeInvalidField: "Invalid field", | |||
| EcodeInvalidForm: "Invalid POST form", | |||
| EcodeRefreshValue: "Value provided on refresh", | |||
| EcodeRefreshTTLRequired: "A TTL must be provided on refresh", | |||
| // raft related errors | |||
| EcodeRaftInternal: "Raft Internal Error", | |||
| EcodeLeaderElect: "During Leader Election", | |||
| // etcd related errors | |||
| EcodeWatcherCleared: "watcher is cleared due to etcd recovery", | |||
| EcodeEventIndexCleared: "The event in requested index is outdated and cleared", | |||
| ecodeStandbyInternal: "Standby Internal Error", | |||
| ecodeInvalidActiveSize: "Invalid active size", | |||
| ecodeInvalidRemoveDelay: "Standby remove delay", | |||
| // client related errors | |||
| ecodeClientInternal: "Client Internal Error", | |||
| } | |||
| var errorStatus = map[int]int{ | |||
| EcodeKeyNotFound: http.StatusNotFound, | |||
| EcodeNotFile: http.StatusForbidden, | |||
| EcodeDirNotEmpty: http.StatusForbidden, | |||
| EcodeUnauthorized: http.StatusUnauthorized, | |||
| EcodeTestFailed: http.StatusPreconditionFailed, | |||
| EcodeNodeExist: http.StatusPreconditionFailed, | |||
| EcodeRaftInternal: http.StatusInternalServerError, | |||
| EcodeLeaderElect: http.StatusInternalServerError, | |||
| } | |||
| const ( | |||
| EcodeKeyNotFound = 100 | |||
| EcodeTestFailed = 101 | |||
| EcodeNotFile = 102 | |||
| ecodeNoMorePeer = 103 | |||
| EcodeNotDir = 104 | |||
| EcodeNodeExist = 105 | |||
| ecodeKeyIsPreserved = 106 | |||
| EcodeRootROnly = 107 | |||
| EcodeDirNotEmpty = 108 | |||
| ecodeExistingPeerAddr = 109 | |||
| EcodeUnauthorized = 110 | |||
| ecodeValueRequired = 200 | |||
| EcodePrevValueRequired = 201 | |||
| EcodeTTLNaN = 202 | |||
| EcodeIndexNaN = 203 | |||
| ecodeValueOrTTLRequired = 204 | |||
| ecodeTimeoutNaN = 205 | |||
| ecodeNameRequired = 206 | |||
| ecodeIndexOrValueRequired = 207 | |||
| ecodeIndexValueMutex = 208 | |||
| EcodeInvalidField = 209 | |||
| EcodeInvalidForm = 210 | |||
| EcodeRefreshValue = 211 | |||
| EcodeRefreshTTLRequired = 212 | |||
| EcodeRaftInternal = 300 | |||
| EcodeLeaderElect = 301 | |||
| EcodeWatcherCleared = 400 | |||
| EcodeEventIndexCleared = 401 | |||
| ecodeStandbyInternal = 402 | |||
| ecodeInvalidActiveSize = 403 | |||
| ecodeInvalidRemoveDelay = 404 | |||
| ecodeClientInternal = 500 | |||
| ) | |||
| type Error struct { | |||
| ErrorCode int `json:"errorCode"` | |||
| Message string `json:"message"` | |||
| Cause string `json:"cause,omitempty"` | |||
| Index uint64 `json:"index"` | |||
| } | |||
| func NewRequestError(errorCode int, cause string) *Error { | |||
| return NewError(errorCode, cause, 0) | |||
| } | |||
| func NewError(errorCode int, cause string, index uint64) *Error { | |||
| return &Error{ | |||
| ErrorCode: errorCode, | |||
| Message: errors[errorCode], | |||
| Cause: cause, | |||
| Index: index, | |||
| } | |||
| } | |||
| // Error is for the error interface | |||
| func (e Error) Error() string { | |||
| return e.Message + " (" + e.Cause + ")" | |||
| } | |||
| func (e Error) toJsonString() string { | |||
| b, _ := json.Marshal(e) | |||
| return string(b) | |||
| } | |||
| func (e Error) StatusCode() int { | |||
| status, ok := errorStatus[e.ErrorCode] | |||
| if !ok { | |||
| status = http.StatusBadRequest | |||
| } | |||
| return status | |||
| } | |||
| func (e Error) WriteTo(w http.ResponseWriter) { | |||
| w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) | |||
| w.Header().Set("Content-Type", "application/json") | |||
| w.WriteHeader(e.StatusCode()) | |||
| fmt.Fprintln(w, e.toJsonString()) | |||
| } | |||
| @@ -1,202 +0,0 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright [yyyy] [name of copyright owner] | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -1,23 +0,0 @@ | |||
| package etcd | |||
| // Add a new directory with a random etcd-generated key under the given path. | |||
| func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.post(key, "", ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // Add a new file with a random etcd-generated key under the given path. | |||
| func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.post(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| @@ -1,476 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "crypto/tls" | |||
| "crypto/x509" | |||
| "encoding/json" | |||
| "errors" | |||
| "io" | |||
| "io/ioutil" | |||
| "math/rand" | |||
| "net" | |||
| "net/http" | |||
| "net/url" | |||
| "os" | |||
| "path" | |||
| "strings" | |||
| "time" | |||
| ) | |||
| // See SetConsistency for how to use these constants. | |||
| const ( | |||
| // Using strings rather than iota because the consistency level | |||
| // could be persisted to disk, so it'd be better to use | |||
| // human-readable values. | |||
| STRONG_CONSISTENCY = "STRONG" | |||
| WEAK_CONSISTENCY = "WEAK" | |||
| ) | |||
| const ( | |||
| defaultBufferSize = 10 | |||
| ) | |||
| func init() { | |||
| rand.Seed(int64(time.Now().Nanosecond())) | |||
| } | |||
| type Config struct { | |||
| CertFile string `json:"certFile"` | |||
| KeyFile string `json:"keyFile"` | |||
| CaCertFile []string `json:"caCertFiles"` | |||
| DialTimeout time.Duration `json:"timeout"` | |||
| Consistency string `json:"consistency"` | |||
| } | |||
| type credentials struct { | |||
| username string | |||
| password string | |||
| } | |||
| type Client struct { | |||
| config Config `json:"config"` | |||
| cluster *Cluster `json:"cluster"` | |||
| httpClient *http.Client | |||
| credentials *credentials | |||
| transport *http.Transport | |||
| persistence io.Writer | |||
| cURLch chan string | |||
| // CheckRetry can be used to control the policy for failed requests | |||
| // and modify the cluster if needed. | |||
| // The client calls it before sending requests again, and | |||
| // stops retrying if CheckRetry returns some error. The cases that | |||
| // this function needs to handle include no response and unexpected | |||
| // http status code of response. | |||
| // If CheckRetry is nil, client will call the default one | |||
| // `DefaultCheckRetry`. | |||
| // Argument cluster is the etcd.Cluster object that these requests have been made on. | |||
| // Argument numReqs is the number of http.Requests that have been made so far. | |||
| // Argument lastResp is the http.Responses from the last request. | |||
| // Argument err is the reason of the failure. | |||
| CheckRetry func(cluster *Cluster, numReqs int, | |||
| lastResp http.Response, err error) error | |||
| } | |||
| // NewClient create a basic client that is configured to be used | |||
| // with the given machine list. | |||
| func NewClient(machines []string) *Client { | |||
| config := Config{ | |||
| // default timeout is one second | |||
| DialTimeout: time.Second, | |||
| Consistency: WEAK_CONSISTENCY, | |||
| } | |||
| client := &Client{ | |||
| cluster: NewCluster(machines), | |||
| config: config, | |||
| } | |||
| client.initHTTPClient() | |||
| client.saveConfig() | |||
| return client | |||
| } | |||
| // NewTLSClient create a basic client with TLS configuration | |||
| func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) { | |||
| // overwrite the default machine to use https | |||
| if len(machines) == 0 { | |||
| machines = []string{"https://127.0.0.1:4001"} | |||
| } | |||
| config := Config{ | |||
| // default timeout is one second | |||
| DialTimeout: time.Second, | |||
| Consistency: WEAK_CONSISTENCY, | |||
| CertFile: cert, | |||
| KeyFile: key, | |||
| CaCertFile: make([]string, 0), | |||
| } | |||
| client := &Client{ | |||
| cluster: NewCluster(machines), | |||
| config: config, | |||
| } | |||
| err := client.initHTTPSClient(cert, key) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = client.AddRootCA(caCert) | |||
| client.saveConfig() | |||
| return client, nil | |||
| } | |||
| // NewClientFromFile creates a client from a given file path. | |||
| // The given file is expected to use the JSON format. | |||
| func NewClientFromFile(fpath string) (*Client, error) { | |||
| fi, err := os.Open(fpath) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer func() { | |||
| if err := fi.Close(); err != nil { | |||
| panic(err) | |||
| } | |||
| }() | |||
| return NewClientFromReader(fi) | |||
| } | |||
| // NewClientFromReader creates a Client configured from a given reader. | |||
| // The configuration is expected to use the JSON format. | |||
| func NewClientFromReader(reader io.Reader) (*Client, error) { | |||
| c := new(Client) | |||
| b, err := ioutil.ReadAll(reader) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = json.Unmarshal(b, c) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if c.config.CertFile == "" { | |||
| c.initHTTPClient() | |||
| } else { | |||
| err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile) | |||
| } | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| for _, caCert := range c.config.CaCertFile { | |||
| if err := c.AddRootCA(caCert); err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| return c, nil | |||
| } | |||
| // Override the Client's HTTP Transport object | |||
| func (c *Client) SetTransport(tr *http.Transport) { | |||
| c.httpClient.Transport = tr | |||
| c.transport = tr | |||
| } | |||
| func (c *Client) SetCredentials(username, password string) { | |||
| c.credentials = &credentials{username, password} | |||
| } | |||
| func (c *Client) Close() { | |||
| c.transport.DisableKeepAlives = true | |||
| c.transport.CloseIdleConnections() | |||
| } | |||
| // initHTTPClient initializes a HTTP client for etcd client | |||
| func (c *Client) initHTTPClient() { | |||
| c.transport = &http.Transport{ | |||
| Dial: c.DefaultDial, | |||
| TLSClientConfig: &tls.Config{ | |||
| InsecureSkipVerify: true, | |||
| }, | |||
| } | |||
| c.httpClient = &http.Client{Transport: c.transport} | |||
| } | |||
| // initHTTPClient initializes a HTTPS client for etcd client | |||
| func (c *Client) initHTTPSClient(cert, key string) error { | |||
| if cert == "" || key == "" { | |||
| return errors.New("Require both cert and key path") | |||
| } | |||
| tlsCert, err := tls.LoadX509KeyPair(cert, key) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| tlsConfig := &tls.Config{ | |||
| Certificates: []tls.Certificate{tlsCert}, | |||
| InsecureSkipVerify: true, | |||
| } | |||
| c.transport = &http.Transport{ | |||
| TLSClientConfig: tlsConfig, | |||
| Dial: c.DefaultDial, | |||
| } | |||
| c.httpClient = &http.Client{Transport: c.transport} | |||
| return nil | |||
| } | |||
| // SetPersistence sets a writer to which the config will be | |||
| // written every time it's changed. | |||
| func (c *Client) SetPersistence(writer io.Writer) { | |||
| c.persistence = writer | |||
| } | |||
| // SetConsistency changes the consistency level of the client. | |||
| // | |||
| // When consistency is set to STRONG_CONSISTENCY, all requests, | |||
| // including GET, are sent to the leader. This means that, assuming | |||
| // the absence of leader failures, GET requests are guaranteed to see | |||
| // the changes made by previous requests. | |||
| // | |||
| // When consistency is set to WEAK_CONSISTENCY, other requests | |||
| // are still sent to the leader, but GET requests are sent to a | |||
| // random server from the server pool. This reduces the read | |||
| // load on the leader, but it's not guaranteed that the GET requests | |||
| // will see changes made by previous requests (they might have not | |||
| // yet been committed on non-leader servers). | |||
| func (c *Client) SetConsistency(consistency string) error { | |||
| if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) { | |||
| return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.") | |||
| } | |||
| c.config.Consistency = consistency | |||
| return nil | |||
| } | |||
| // Sets the DialTimeout value | |||
| func (c *Client) SetDialTimeout(d time.Duration) { | |||
| c.config.DialTimeout = d | |||
| } | |||
| // AddRootCA adds a root CA cert for the etcd client | |||
| func (c *Client) AddRootCA(caCert string) error { | |||
| if c.httpClient == nil { | |||
| return errors.New("Client has not been initialized yet!") | |||
| } | |||
| certBytes, err := ioutil.ReadFile(caCert) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| tr, ok := c.httpClient.Transport.(*http.Transport) | |||
| if !ok { | |||
| panic("AddRootCA(): Transport type assert should not fail") | |||
| } | |||
| if tr.TLSClientConfig.RootCAs == nil { | |||
| caCertPool := x509.NewCertPool() | |||
| ok = caCertPool.AppendCertsFromPEM(certBytes) | |||
| if ok { | |||
| tr.TLSClientConfig.RootCAs = caCertPool | |||
| } | |||
| tr.TLSClientConfig.InsecureSkipVerify = false | |||
| } else { | |||
| ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes) | |||
| } | |||
| if !ok { | |||
| err = errors.New("Unable to load caCert") | |||
| } | |||
| c.config.CaCertFile = append(c.config.CaCertFile, caCert) | |||
| c.saveConfig() | |||
| return err | |||
| } | |||
| // SetCluster updates cluster information using the given machine list. | |||
| func (c *Client) SetCluster(machines []string) bool { | |||
| success := c.internalSyncCluster(machines) | |||
| return success | |||
| } | |||
| func (c *Client) GetCluster() []string { | |||
| return c.cluster.Machines | |||
| } | |||
| // SyncCluster updates the cluster information using the internal machine list. | |||
| // If no members are found, the intenral machine list is left untouched. | |||
| func (c *Client) SyncCluster() bool { | |||
| return c.internalSyncCluster(c.cluster.Machines) | |||
| } | |||
| // internalSyncCluster syncs cluster information using the given machine list. | |||
| func (c *Client) internalSyncCluster(machines []string) bool { | |||
| // comma-separated list of machines in the cluster. | |||
| members := "" | |||
| for _, machine := range machines { | |||
| httpPath := c.createHttpPath(machine, path.Join(version, "members")) | |||
| resp, err := c.httpClient.Get(httpPath) | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| if resp.StatusCode != http.StatusOK { // fall-back to old endpoint | |||
| httpPath := c.createHttpPath(machine, path.Join(version, "machines")) | |||
| resp, err := c.httpClient.Get(httpPath) | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| b, err := ioutil.ReadAll(resp.Body) | |||
| resp.Body.Close() | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| members = string(b) | |||
| } else { | |||
| b, err := ioutil.ReadAll(resp.Body) | |||
| resp.Body.Close() | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| var mCollection memberCollection | |||
| if err := json.Unmarshal(b, &mCollection); err != nil { | |||
| // try another machine | |||
| continue | |||
| } | |||
| urls := make([]string, 0) | |||
| for _, m := range mCollection { | |||
| urls = append(urls, m.ClientURLs...) | |||
| } | |||
| members = strings.Join(urls, ",") | |||
| } | |||
| // We should never do an empty cluster update. | |||
| if members == "" { | |||
| continue | |||
| } | |||
| // update Machines List | |||
| c.cluster.updateFromStr(members) | |||
| logger.Debug("sync.machines ", c.cluster.Machines) | |||
| c.saveConfig() | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| // createHttpPath creates a complete HTTP URL. | |||
| // serverName should contain both the host name and a port number, if any. | |||
| func (c *Client) createHttpPath(serverName string, _path string) string { | |||
| u, err := url.Parse(serverName) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| u.Path = path.Join(u.Path, _path) | |||
| if u.Scheme == "" { | |||
| u.Scheme = "http" | |||
| } | |||
| return u.String() | |||
| } | |||
| // DefaultDial attempts to open a TCP connection to the provided address, explicitly | |||
| // enabling keep-alives with a one-second interval. | |||
| func (c *Client) DefaultDial(network, addr string) (net.Conn, error) { | |||
| dialer := net.Dialer{ | |||
| Timeout: c.config.DialTimeout, | |||
| KeepAlive: time.Second, | |||
| } | |||
| return dialer.Dial(network, addr) | |||
| } | |||
| func (c *Client) OpenCURL() { | |||
| c.cURLch = make(chan string, defaultBufferSize) | |||
| } | |||
| func (c *Client) CloseCURL() { | |||
| c.cURLch = nil | |||
| } | |||
| func (c *Client) sendCURL(command string) { | |||
| go func() { | |||
| select { | |||
| case c.cURLch <- command: | |||
| default: | |||
| } | |||
| }() | |||
| } | |||
| func (c *Client) RecvCURL() string { | |||
| return <-c.cURLch | |||
| } | |||
| // saveConfig saves the current config using c.persistence. | |||
| func (c *Client) saveConfig() error { | |||
| if c.persistence != nil { | |||
| b, err := json.Marshal(c) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = c.persistence.Write(b) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // MarshalJSON implements the Marshaller interface | |||
| // as defined by the standard JSON package. | |||
| func (c *Client) MarshalJSON() ([]byte, error) { | |||
| b, err := json.Marshal(struct { | |||
| Config Config `json:"config"` | |||
| Cluster *Cluster `json:"cluster"` | |||
| }{ | |||
| Config: c.config, | |||
| Cluster: c.cluster, | |||
| }) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return b, nil | |||
| } | |||
| // UnmarshalJSON implements the Unmarshaller interface | |||
| // as defined by the standard JSON package. | |||
| func (c *Client) UnmarshalJSON(b []byte) error { | |||
| temp := struct { | |||
| Config Config `json:"config"` | |||
| Cluster *Cluster `json:"cluster"` | |||
| }{} | |||
| err := json.Unmarshal(b, &temp) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| c.cluster = temp.Cluster | |||
| c.config = temp.Config | |||
| return nil | |||
| } | |||
| @@ -1,54 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "math/rand" | |||
| "strings" | |||
| "sync" | |||
| ) | |||
| type Cluster struct { | |||
| Leader string `json:"leader"` | |||
| Machines []string `json:"machines"` | |||
| picked int | |||
| mu sync.RWMutex | |||
| } | |||
| func NewCluster(machines []string) *Cluster { | |||
| // if an empty slice was sent in then just assume HTTP 4001 on localhost | |||
| if len(machines) == 0 { | |||
| machines = []string{"http://127.0.0.1:4001"} | |||
| } | |||
| machines = shuffleStringSlice(machines) | |||
| logger.Debug("Shuffle cluster machines", machines) | |||
| // default leader and machines | |||
| return &Cluster{ | |||
| Leader: "", | |||
| Machines: machines, | |||
| picked: rand.Intn(len(machines)), | |||
| } | |||
| } | |||
| func (cl *Cluster) failure() { | |||
| cl.mu.Lock() | |||
| defer cl.mu.Unlock() | |||
| cl.picked = (cl.picked + 1) % len(cl.Machines) | |||
| } | |||
| func (cl *Cluster) pick() string { | |||
| cl.mu.Lock() | |||
| defer cl.mu.Unlock() | |||
| return cl.Machines[cl.picked] | |||
| } | |||
| func (cl *Cluster) updateFromStr(machines string) { | |||
| cl.mu.Lock() | |||
| defer cl.mu.Unlock() | |||
| cl.Machines = strings.Split(machines, ",") | |||
| for i := range cl.Machines { | |||
| cl.Machines[i] = strings.TrimSpace(cl.Machines[i]) | |||
| } | |||
| cl.Machines = shuffleStringSlice(cl.Machines) | |||
| cl.picked = rand.Intn(len(cl.Machines)) | |||
| } | |||
| @@ -1,34 +0,0 @@ | |||
| package etcd | |||
| import "fmt" | |||
| func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) { | |||
| raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) { | |||
| if prevValue == "" && prevIndex == 0 { | |||
| return nil, fmt.Errorf("You must give either prevValue or prevIndex.") | |||
| } | |||
| options := Options{} | |||
| if prevValue != "" { | |||
| options["prevValue"] = prevValue | |||
| } | |||
| if prevIndex != 0 { | |||
| options["prevIndex"] = prevIndex | |||
| } | |||
| raw, err := c.delete(key, options) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw, err | |||
| } | |||
| @@ -1,36 +0,0 @@ | |||
| package etcd | |||
| import "fmt" | |||
| func (c *Client) CompareAndSwap(key string, value string, ttl uint64, | |||
| prevValue string, prevIndex uint64) (*Response, error) { | |||
| raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64, | |||
| prevValue string, prevIndex uint64) (*RawResponse, error) { | |||
| if prevValue == "" && prevIndex == 0 { | |||
| return nil, fmt.Errorf("You must give either prevValue or prevIndex.") | |||
| } | |||
| options := Options{} | |||
| if prevValue != "" { | |||
| options["prevValue"] = prevValue | |||
| } | |||
| if prevIndex != 0 { | |||
| options["prevIndex"] = prevIndex | |||
| } | |||
| raw, err := c.put(key, value, ttl, options) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw, err | |||
| } | |||
| @@ -1,55 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "fmt" | |||
| "io/ioutil" | |||
| "log" | |||
| "strings" | |||
| ) | |||
| var logger *etcdLogger | |||
| func SetLogger(l *log.Logger) { | |||
| logger = &etcdLogger{l} | |||
| } | |||
| func GetLogger() *log.Logger { | |||
| return logger.log | |||
| } | |||
| type etcdLogger struct { | |||
| log *log.Logger | |||
| } | |||
| func (p *etcdLogger) Debug(args ...interface{}) { | |||
| msg := "DEBUG: " + fmt.Sprint(args...) | |||
| p.log.Println(msg) | |||
| } | |||
| func (p *etcdLogger) Debugf(f string, args ...interface{}) { | |||
| msg := "DEBUG: " + fmt.Sprintf(f, args...) | |||
| // Append newline if necessary | |||
| if !strings.HasSuffix(msg, "\n") { | |||
| msg = msg + "\n" | |||
| } | |||
| p.log.Print(msg) | |||
| } | |||
| func (p *etcdLogger) Warning(args ...interface{}) { | |||
| msg := "WARNING: " + fmt.Sprint(args...) | |||
| p.log.Println(msg) | |||
| } | |||
| func (p *etcdLogger) Warningf(f string, args ...interface{}) { | |||
| msg := "WARNING: " + fmt.Sprintf(f, args...) | |||
| // Append newline if necessary | |||
| if !strings.HasSuffix(msg, "\n") { | |||
| msg = msg + "\n" | |||
| } | |||
| p.log.Print(msg) | |||
| } | |||
| func init() { | |||
| // Default logger uses the go default log. | |||
| SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags)) | |||
| } | |||
| @@ -1,40 +0,0 @@ | |||
| package etcd | |||
| // Delete deletes the given key. | |||
| // | |||
| // When recursive set to false, if the key points to a | |||
| // directory the method will fail. | |||
| // | |||
| // When recursive set to true, if the key points to a file, | |||
| // the file will be deleted; if the key points to a directory, | |||
| // then everything under the directory (including all child directories) | |||
| // will be deleted. | |||
| func (c *Client) Delete(key string, recursive bool) (*Response, error) { | |||
| raw, err := c.RawDelete(key, recursive, false) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // DeleteDir deletes an empty directory or a key value pair | |||
| func (c *Client) DeleteDir(key string) (*Response, error) { | |||
| raw, err := c.RawDelete(key, false, true) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "recursive": recursive, | |||
| "dir": dir, | |||
| } | |||
| return c.delete(key, ops) | |||
| } | |||
| @@ -1,49 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "encoding/json" | |||
| "fmt" | |||
| ) | |||
| const ( | |||
| ErrCodeEtcdNotReachable = 501 | |||
| ErrCodeUnhandledHTTPStatus = 502 | |||
| ) | |||
| var ( | |||
| errorMap = map[int]string{ | |||
| ErrCodeEtcdNotReachable: "All the given peers are not reachable", | |||
| } | |||
| ) | |||
| type EtcdError struct { | |||
| ErrorCode int `json:"errorCode"` | |||
| Message string `json:"message"` | |||
| Cause string `json:"cause,omitempty"` | |||
| Index uint64 `json:"index"` | |||
| } | |||
| func (e EtcdError) Error() string { | |||
| return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index) | |||
| } | |||
| func newError(errorCode int, cause string, index uint64) *EtcdError { | |||
| return &EtcdError{ | |||
| ErrorCode: errorCode, | |||
| Message: errorMap[errorCode], | |||
| Cause: cause, | |||
| Index: index, | |||
| } | |||
| } | |||
| func handleError(b []byte) error { | |||
| etcdErr := new(EtcdError) | |||
| err := json.Unmarshal(b, etcdErr) | |||
| if err != nil { | |||
| logger.Warningf("cannot unmarshal etcd error: %v", err) | |||
| return err | |||
| } | |||
| return etcdErr | |||
| } | |||
| @@ -1,32 +0,0 @@ | |||
| package etcd | |||
| // Get gets the file or directory associated with the given key. | |||
| // If the key points to a directory, files and directories under | |||
| // it will be returned in sorted or unsorted order, depending on | |||
| // the sort flag. | |||
| // If recursive is set to false, contents under child directories | |||
| // will not be returned. | |||
| // If recursive is set to true, all the contents will be returned. | |||
| func (c *Client) Get(key string, sort, recursive bool) (*Response, error) { | |||
| raw, err := c.RawGet(key, sort, recursive) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) { | |||
| var q bool | |||
| if c.config.Consistency == STRONG_CONSISTENCY { | |||
| q = true | |||
| } | |||
| ops := Options{ | |||
| "recursive": recursive, | |||
| "sorted": sort, | |||
| "quorum": q, | |||
| } | |||
| return c.get(key, ops) | |||
| } | |||
| @@ -1,30 +0,0 @@ | |||
| package etcd | |||
| import "encoding/json" | |||
| type Member struct { | |||
| ID string `json:"id"` | |||
| Name string `json:"name"` | |||
| PeerURLs []string `json:"peerURLs"` | |||
| ClientURLs []string `json:"clientURLs"` | |||
| } | |||
| type memberCollection []Member | |||
| func (c *memberCollection) UnmarshalJSON(data []byte) error { | |||
| d := struct { | |||
| Members []Member | |||
| }{} | |||
| if err := json.Unmarshal(data, &d); err != nil { | |||
| return err | |||
| } | |||
| if d.Members == nil { | |||
| *c = make([]Member, 0) | |||
| return nil | |||
| } | |||
| *c = d.Members | |||
| return nil | |||
| } | |||
| @@ -1,72 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "fmt" | |||
| "net/url" | |||
| "reflect" | |||
| ) | |||
| type Options map[string]interface{} | |||
| // An internally-used data structure that represents a mapping | |||
| // between valid options and their kinds | |||
| type validOptions map[string]reflect.Kind | |||
| // Valid options for GET, PUT, POST, DELETE | |||
| // Using CAPITALIZED_UNDERSCORE to emphasize that these | |||
| // values are meant to be used as constants. | |||
| var ( | |||
| VALID_GET_OPTIONS = validOptions{ | |||
| "recursive": reflect.Bool, | |||
| "quorum": reflect.Bool, | |||
| "sorted": reflect.Bool, | |||
| "wait": reflect.Bool, | |||
| "waitIndex": reflect.Uint64, | |||
| } | |||
| VALID_PUT_OPTIONS = validOptions{ | |||
| "prevValue": reflect.String, | |||
| "prevIndex": reflect.Uint64, | |||
| "prevExist": reflect.Bool, | |||
| "dir": reflect.Bool, | |||
| } | |||
| VALID_POST_OPTIONS = validOptions{} | |||
| VALID_DELETE_OPTIONS = validOptions{ | |||
| "recursive": reflect.Bool, | |||
| "dir": reflect.Bool, | |||
| "prevValue": reflect.String, | |||
| "prevIndex": reflect.Uint64, | |||
| } | |||
| ) | |||
| // Convert options to a string of HTML parameters | |||
| func (ops Options) toParameters(validOps validOptions) (string, error) { | |||
| p := "?" | |||
| values := url.Values{} | |||
| if ops == nil { | |||
| return "", nil | |||
| } | |||
| for k, v := range ops { | |||
| // Check if the given option is valid (that it exists) | |||
| kind := validOps[k] | |||
| if kind == reflect.Invalid { | |||
| return "", fmt.Errorf("Invalid option: %v", k) | |||
| } | |||
| // Check if the given option is of the valid type | |||
| t := reflect.TypeOf(v) | |||
| if kind != t.Kind() { | |||
| return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.", | |||
| k, kind, t.Kind()) | |||
| } | |||
| values.Set(k, fmt.Sprintf("%v", v)) | |||
| } | |||
| p += values.Encode() | |||
| return p, nil | |||
| } | |||
| @@ -1,403 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "io/ioutil" | |||
| "net/http" | |||
| "net/url" | |||
| "path" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| ) | |||
| // Errors introduced by handling requests | |||
| var ( | |||
| ErrRequestCancelled = errors.New("sending request is cancelled") | |||
| ) | |||
| type RawRequest struct { | |||
| Method string | |||
| RelativePath string | |||
| Values url.Values | |||
| Cancel <-chan bool | |||
| } | |||
| // NewRawRequest returns a new RawRequest | |||
| func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest { | |||
| return &RawRequest{ | |||
| Method: method, | |||
| RelativePath: relativePath, | |||
| Values: values, | |||
| Cancel: cancel, | |||
| } | |||
| } | |||
| // getCancelable issues a cancelable GET request | |||
| func (c *Client) getCancelable(key string, options Options, | |||
| cancel <-chan bool) (*RawResponse, error) { | |||
| logger.Debugf("get %s [%s]", key, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| str, err := options.toParameters(VALID_GET_OPTIONS) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p += str | |||
| req := NewRawRequest("GET", p, nil, cancel) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // get issues a GET request | |||
| func (c *Client) get(key string, options Options) (*RawResponse, error) { | |||
| return c.getCancelable(key, options, nil) | |||
| } | |||
| // put issues a PUT request | |||
| func (c *Client) put(key string, value string, ttl uint64, | |||
| options Options) (*RawResponse, error) { | |||
| logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| str, err := options.toParameters(VALID_PUT_OPTIONS) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p += str | |||
| req := NewRawRequest("PUT", p, buildValues(value, ttl), nil) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // post issues a POST request | |||
| func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| req := NewRawRequest("POST", p, buildValues(value, ttl), nil) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // delete issues a DELETE request | |||
| func (c *Client) delete(key string, options Options) (*RawResponse, error) { | |||
| logger.Debugf("delete %s [%s]", key, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| str, err := options.toParameters(VALID_DELETE_OPTIONS) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p += str | |||
| req := NewRawRequest("DELETE", p, nil, nil) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // SendRequest sends a HTTP request and returns a Response as defined by etcd | |||
| func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { | |||
| var req *http.Request | |||
| var resp *http.Response | |||
| var httpPath string | |||
| var err error | |||
| var respBody []byte | |||
| var numReqs = 1 | |||
| checkRetry := c.CheckRetry | |||
| if checkRetry == nil { | |||
| checkRetry = DefaultCheckRetry | |||
| } | |||
| cancelled := make(chan bool, 1) | |||
| reqLock := new(sync.Mutex) | |||
| if rr.Cancel != nil { | |||
| cancelRoutine := make(chan bool) | |||
| defer close(cancelRoutine) | |||
| go func() { | |||
| select { | |||
| case <-rr.Cancel: | |||
| cancelled <- true | |||
| logger.Debug("send.request is cancelled") | |||
| case <-cancelRoutine: | |||
| return | |||
| } | |||
| // Repeat canceling request until this thread is stopped | |||
| // because we have no idea about whether it succeeds. | |||
| for { | |||
| reqLock.Lock() | |||
| c.httpClient.Transport.(*http.Transport).CancelRequest(req) | |||
| reqLock.Unlock() | |||
| select { | |||
| case <-time.After(100 * time.Millisecond): | |||
| case <-cancelRoutine: | |||
| return | |||
| } | |||
| } | |||
| }() | |||
| } | |||
| // If we connect to a follower and consistency is required, retry until | |||
| // we connect to a leader | |||
| sleep := 25 * time.Millisecond | |||
| maxSleep := time.Second | |||
| for attempt := 0; ; attempt++ { | |||
| if attempt > 0 { | |||
| select { | |||
| case <-cancelled: | |||
| return nil, ErrRequestCancelled | |||
| case <-time.After(sleep): | |||
| sleep = sleep * 2 | |||
| if sleep > maxSleep { | |||
| sleep = maxSleep | |||
| } | |||
| } | |||
| } | |||
| logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath) | |||
| // get httpPath if not set | |||
| if httpPath == "" { | |||
| httpPath = c.getHttpPath(rr.RelativePath) | |||
| } | |||
| // Return a cURL command if curlChan is set | |||
| if c.cURLch != nil { | |||
| command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath) | |||
| for key, value := range rr.Values { | |||
| command += fmt.Sprintf(" -d %s=%s", key, value[0]) | |||
| } | |||
| if c.credentials != nil { | |||
| command += fmt.Sprintf(" -u %s", c.credentials.username) | |||
| } | |||
| c.sendCURL(command) | |||
| } | |||
| logger.Debug("send.request.to ", httpPath, " | method ", rr.Method) | |||
| req, err := func() (*http.Request, error) { | |||
| reqLock.Lock() | |||
| defer reqLock.Unlock() | |||
| if rr.Values == nil { | |||
| if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil { | |||
| return nil, err | |||
| } | |||
| } else { | |||
| body := strings.NewReader(rr.Values.Encode()) | |||
| if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil { | |||
| return nil, err | |||
| } | |||
| req.Header.Set("Content-Type", | |||
| "application/x-www-form-urlencoded; param=value") | |||
| } | |||
| return req, nil | |||
| }() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if c.credentials != nil { | |||
| req.SetBasicAuth(c.credentials.username, c.credentials.password) | |||
| } | |||
| resp, err = c.httpClient.Do(req) | |||
| // clear previous httpPath | |||
| httpPath = "" | |||
| defer func() { | |||
| if resp != nil { | |||
| resp.Body.Close() | |||
| } | |||
| }() | |||
| // If the request was cancelled, return ErrRequestCancelled directly | |||
| select { | |||
| case <-cancelled: | |||
| return nil, ErrRequestCancelled | |||
| default: | |||
| } | |||
| numReqs++ | |||
| // network error, change a machine! | |||
| if err != nil { | |||
| logger.Debug("network error: ", err.Error()) | |||
| lastResp := http.Response{} | |||
| if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil { | |||
| return nil, checkErr | |||
| } | |||
| c.cluster.failure() | |||
| continue | |||
| } | |||
| // if there is no error, it should receive response | |||
| logger.Debug("recv.response.from ", httpPath) | |||
| if validHttpStatusCode[resp.StatusCode] { | |||
| // try to read byte code and break the loop | |||
| respBody, err = ioutil.ReadAll(resp.Body) | |||
| if err == nil { | |||
| logger.Debug("recv.success ", httpPath) | |||
| break | |||
| } | |||
| // ReadAll error may be caused due to cancel request | |||
| select { | |||
| case <-cancelled: | |||
| return nil, ErrRequestCancelled | |||
| default: | |||
| } | |||
| if err == io.ErrUnexpectedEOF { | |||
| // underlying connection was closed prematurely, probably by timeout | |||
| // TODO: empty body or unexpectedEOF can cause http.Transport to get hosed; | |||
| // this allows the client to detect that and take evasive action. Need | |||
| // to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed. | |||
| respBody = []byte{} | |||
| break | |||
| } | |||
| } | |||
| if resp.StatusCode == http.StatusTemporaryRedirect { | |||
| u, err := resp.Location() | |||
| if err != nil { | |||
| logger.Warning(err) | |||
| } else { | |||
| // set httpPath for following redirection | |||
| httpPath = u.String() | |||
| } | |||
| resp.Body.Close() | |||
| continue | |||
| } | |||
| if checkErr := checkRetry(c.cluster, numReqs, *resp, | |||
| errors.New("Unexpected HTTP status code")); checkErr != nil { | |||
| return nil, checkErr | |||
| } | |||
| resp.Body.Close() | |||
| } | |||
| r := &RawResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Body: respBody, | |||
| Header: resp.Header, | |||
| } | |||
| return r, nil | |||
| } | |||
| // DefaultCheckRetry defines the retrying behaviour for bad HTTP requests | |||
| // If we have retried 2 * machine number, stop retrying. | |||
| // If status code is InternalServerError, sleep for 200ms. | |||
| func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response, | |||
| err error) error { | |||
| if numReqs > 2*len(cluster.Machines) { | |||
| errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err) | |||
| return newError(ErrCodeEtcdNotReachable, errStr, 0) | |||
| } | |||
| if isEmptyResponse(lastResp) { | |||
| // always retry if it failed to get response from one machine | |||
| return nil | |||
| } | |||
| if !shouldRetry(lastResp) { | |||
| body := []byte("nil") | |||
| if lastResp.Body != nil { | |||
| if b, err := ioutil.ReadAll(lastResp.Body); err == nil { | |||
| body = b | |||
| } | |||
| } | |||
| errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body) | |||
| return newError(ErrCodeUnhandledHTTPStatus, errStr, 0) | |||
| } | |||
| // sleep some time and expect leader election finish | |||
| time.Sleep(time.Millisecond * 200) | |||
| logger.Warning("bad response status code ", lastResp.StatusCode) | |||
| return nil | |||
| } | |||
| func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 } | |||
| // shouldRetry returns whether the reponse deserves retry. | |||
| func shouldRetry(r http.Response) bool { | |||
| // TODO: only retry when the cluster is in leader election | |||
| // We cannot do it exactly because etcd doesn't support it well. | |||
| return r.StatusCode == http.StatusInternalServerError | |||
| } | |||
| func (c *Client) getHttpPath(s ...string) string { | |||
| fullPath := c.cluster.pick() + "/" + version | |||
| for _, seg := range s { | |||
| fullPath = fullPath + "/" + seg | |||
| } | |||
| return fullPath | |||
| } | |||
| // buildValues builds a url.Values map according to the given value and ttl | |||
| func buildValues(value string, ttl uint64) url.Values { | |||
| v := url.Values{} | |||
| if value != "" { | |||
| v.Set("value", value) | |||
| } | |||
| if ttl > 0 { | |||
| v.Set("ttl", fmt.Sprintf("%v", ttl)) | |||
| } | |||
| return v | |||
| } | |||
| // convert key string to http path exclude version, including URL escaping | |||
| // for example: key[foo] -> path[keys/foo] | |||
| // key[/%z] -> path[keys/%25z] | |||
| // key[/] -> path[keys/] | |||
| func keyToPath(key string) string { | |||
| // URL-escape our key, except for slashes | |||
| p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1) | |||
| // corner case: if key is "/" or "//" ect | |||
| // path join will clear the tailing "/" | |||
| // we need to add it back | |||
| if p == "keys" { | |||
| p = "keys/" | |||
| } | |||
| return p | |||
| } | |||
| @@ -1,93 +0,0 @@ | |||
| package etcd | |||
| //go:generate codecgen -d 1978 -o response.generated.go response.go | |||
| import ( | |||
| "net/http" | |||
| "strconv" | |||
| "time" | |||
| "github.com/ugorji/go/codec" | |||
| ) | |||
| const ( | |||
| rawResponse = iota | |||
| normalResponse | |||
| ) | |||
| type responseType int | |||
| type RawResponse struct { | |||
| StatusCode int | |||
| Body []byte | |||
| Header http.Header | |||
| } | |||
| var ( | |||
| validHttpStatusCode = map[int]bool{ | |||
| http.StatusCreated: true, | |||
| http.StatusOK: true, | |||
| http.StatusBadRequest: true, | |||
| http.StatusNotFound: true, | |||
| http.StatusPreconditionFailed: true, | |||
| http.StatusForbidden: true, | |||
| http.StatusUnauthorized: true, | |||
| } | |||
| ) | |||
| // Unmarshal parses RawResponse and stores the result in Response | |||
| func (rr *RawResponse) Unmarshal() (*Response, error) { | |||
| if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated { | |||
| return nil, handleError(rr.Body) | |||
| } | |||
| resp := new(Response) | |||
| err := codec.NewDecoderBytes(rr.Body, new(codec.JsonHandle)).Decode(resp) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // attach index and term to response | |||
| resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64) | |||
| resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64) | |||
| resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64) | |||
| return resp, nil | |||
| } | |||
| type Response struct { | |||
| Action string `json:"action"` | |||
| Node *Node `json:"node"` | |||
| PrevNode *Node `json:"prevNode,omitempty"` | |||
| EtcdIndex uint64 `json:"etcdIndex"` | |||
| RaftIndex uint64 `json:"raftIndex"` | |||
| RaftTerm uint64 `json:"raftTerm"` | |||
| } | |||
| type Node struct { | |||
| Key string `json:"key, omitempty"` | |||
| Value string `json:"value,omitempty"` | |||
| Dir bool `json:"dir,omitempty"` | |||
| Expiration *time.Time `json:"expiration,omitempty"` | |||
| TTL int64 `json:"ttl,omitempty"` | |||
| Nodes Nodes `json:"nodes,omitempty"` | |||
| ModifiedIndex uint64 `json:"modifiedIndex,omitempty"` | |||
| CreatedIndex uint64 `json:"createdIndex,omitempty"` | |||
| } | |||
| type Nodes []*Node | |||
| // interfaces for sorting | |||
| func (ns Nodes) Len() int { | |||
| return len(ns) | |||
| } | |||
| func (ns Nodes) Less(i, j int) bool { | |||
| return ns[i].Key < ns[j].Key | |||
| } | |||
| func (ns Nodes) Swap(i, j int) { | |||
| ns[i], ns[j] = ns[j], ns[i] | |||
| } | |||
| @@ -1,137 +0,0 @@ | |||
| package etcd | |||
| // Set sets the given key to the given value. | |||
| // It will create a new key value pair or replace the old one. | |||
| // It will not replace a existing directory. | |||
| func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawSet(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // SetDir sets the given key to a directory. | |||
| // It will create a new directory or replace the old key value pair by a directory. | |||
| // It will not replace a existing directory. | |||
| func (c *Client) SetDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawSetDir(key, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // CreateDir creates a directory. It succeeds only if | |||
| // the given key does not yet exist. | |||
| func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawCreateDir(key, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // UpdateDir updates the given directory. It succeeds only if the | |||
| // given key already exists. | |||
| func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawUpdateDir(key, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // Create creates a file with the given value under the given key. It succeeds | |||
| // only if the given key does not yet exist. | |||
| func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawCreate(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // CreateInOrder creates a file with a key that's guaranteed to be higher than other | |||
| // keys in the given directory. It is useful for creating queues. | |||
| func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawCreateInOrder(dir, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // Update updates the given key to the given value. It succeeds only if the | |||
| // given key already exists. | |||
| func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawUpdate(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": true, | |||
| "dir": true, | |||
| } | |||
| return c.put(key, "", ttl, ops) | |||
| } | |||
| func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": false, | |||
| "dir": true, | |||
| } | |||
| return c.put(key, "", ttl, ops) | |||
| } | |||
| func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| return c.put(key, value, ttl, nil) | |||
| } | |||
| func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "dir": true, | |||
| } | |||
| return c.put(key, "", ttl, ops) | |||
| } | |||
| func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": true, | |||
| } | |||
| return c.put(key, value, ttl, ops) | |||
| } | |||
| func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": false, | |||
| } | |||
| return c.put(key, value, ttl, ops) | |||
| } | |||
| func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) { | |||
| return c.post(dir, value, ttl) | |||
| } | |||
| @@ -1,19 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "math/rand" | |||
| ) | |||
| func shuffleStringSlice(cards []string) []string { | |||
| size := len(cards) | |||
| //Do not need to copy if nothing changed | |||
| if size <= 1 { | |||
| return cards | |||
| } | |||
| shuffled := make([]string, size) | |||
| index := rand.Perm(size) | |||
| for i := range cards { | |||
| shuffled[index[i]] = cards[i] | |||
| } | |||
| return shuffled | |||
| } | |||
| @@ -1,6 +0,0 @@ | |||
| package etcd | |||
| const ( | |||
| version = "v2" | |||
| packageVersion = "v2.0.0+git" | |||
| ) | |||
| @@ -1,103 +0,0 @@ | |||
| package etcd | |||
| import ( | |||
| "errors" | |||
| ) | |||
| // Errors introduced by the Watch command. | |||
| var ( | |||
| ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel") | |||
| ) | |||
| // If recursive is set to true the watch returns the first change under the given | |||
| // prefix since the given index. | |||
| // | |||
| // If recursive is set to false the watch returns the first change to the given key | |||
| // since the given index. | |||
| // | |||
| // To watch for the latest change, set waitIndex = 0. | |||
| // | |||
| // If a receiver channel is given, it will be a long-term watch. Watch will block at the | |||
| //channel. After someone receives the channel, it will go on to watch that | |||
| // prefix. If a stop channel is given, the client can close long-term watch using | |||
| // the stop channel. | |||
| func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool, | |||
| receiver chan *Response, stop chan bool) (*Response, error) { | |||
| logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader) | |||
| if receiver == nil { | |||
| raw, err := c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| defer close(receiver) | |||
| for { | |||
| raw, err := c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| resp, err := raw.Unmarshal() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| waitIndex = resp.Node.ModifiedIndex + 1 | |||
| receiver <- resp | |||
| } | |||
| } | |||
| func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool, | |||
| receiver chan *RawResponse, stop chan bool) (*RawResponse, error) { | |||
| logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader) | |||
| if receiver == nil { | |||
| return c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| } | |||
| for { | |||
| raw, err := c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| resp, err := raw.Unmarshal() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| waitIndex = resp.Node.ModifiedIndex + 1 | |||
| receiver <- raw | |||
| } | |||
| } | |||
| // helper func | |||
| // return when there is change under the given prefix | |||
| func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) { | |||
| options := Options{ | |||
| "wait": true, | |||
| } | |||
| if waitIndex > 0 { | |||
| options["waitIndex"] = waitIndex | |||
| } | |||
| if recursive { | |||
| options["recursive"] = true | |||
| } | |||
| resp, err := c.getCancelable(key, options, stop) | |||
| if err == ErrRequestCancelled { | |||
| return nil, ErrWatchStoppedByUser | |||
| } | |||
| return resp, err | |||
| } | |||
| @@ -1,326 +0,0 @@ | |||
| // Copyright 2015 The Xorm Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package tidb | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/go-xorm/core" | |||
| ) | |||
| type tidb struct { | |||
| core.Base | |||
| } | |||
| func (db *tidb) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { | |||
| return db.Base.Init(d, db, uri, drivername, dataSourceName) | |||
| } | |||
| func (db *tidb) SqlType(c *core.Column) string { | |||
| var res string | |||
| switch t := c.SQLType.Name; t { | |||
| case core.Bool: | |||
| res = core.Bool | |||
| case core.Serial: | |||
| c.IsAutoIncrement = true | |||
| c.IsPrimaryKey = true | |||
| c.Nullable = false | |||
| res = core.Int | |||
| case core.BigSerial: | |||
| c.IsAutoIncrement = true | |||
| c.IsPrimaryKey = true | |||
| c.Nullable = false | |||
| res = core.BigInt | |||
| case core.Bytea: | |||
| res = core.Blob | |||
| case core.TimeStampz: | |||
| res = core.Char | |||
| c.Length = 64 | |||
| case core.Enum: //mysql enum | |||
| res = core.Enum | |||
| res += "(" | |||
| opts := "" | |||
| for v, _ := range c.EnumOptions { | |||
| opts += fmt.Sprintf(",'%v'", v) | |||
| } | |||
| res += strings.TrimLeft(opts, ",") | |||
| res += ")" | |||
| case core.Set: //mysql set | |||
| res = core.Set | |||
| res += "(" | |||
| opts := "" | |||
| for v, _ := range c.SetOptions { | |||
| opts += fmt.Sprintf(",'%v'", v) | |||
| } | |||
| res += strings.TrimLeft(opts, ",") | |||
| res += ")" | |||
| case core.NVarchar: | |||
| res = core.Varchar | |||
| case core.Uuid: | |||
| res = core.Varchar | |||
| c.Length = 40 | |||
| case core.Json: | |||
| res = core.Text | |||
| default: | |||
| res = t | |||
| } | |||
| var hasLen1 bool = (c.Length > 0) | |||
| var hasLen2 bool = (c.Length2 > 0) | |||
| if res == core.BigInt && !hasLen1 && !hasLen2 { | |||
| c.Length = 20 | |||
| hasLen1 = true | |||
| } | |||
| if hasLen2 { | |||
| res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" | |||
| } else if hasLen1 { | |||
| res += "(" + strconv.Itoa(c.Length) + ")" | |||
| } | |||
| return res | |||
| } | |||
| func (db *tidb) SupportInsertMany() bool { | |||
| return true | |||
| } | |||
| func (db *tidb) IsReserved(name string) bool { | |||
| return false | |||
| } | |||
| func (db *tidb) Quote(name string) string { | |||
| return "`" + name + "`" | |||
| } | |||
| func (db *tidb) QuoteStr() string { | |||
| return "`" | |||
| } | |||
| func (db *tidb) SupportEngine() bool { | |||
| return false | |||
| } | |||
| func (db *tidb) AutoIncrStr() string { | |||
| return "AUTO_INCREMENT" | |||
| } | |||
| func (db *tidb) SupportCharset() bool { | |||
| return false | |||
| } | |||
| func (db *tidb) IndexOnTable() bool { | |||
| return true | |||
| } | |||
| func (db *tidb) IndexCheckSql(tableName, idxName string) (string, []interface{}) { | |||
| args := []interface{}{db.DbName, tableName, idxName} | |||
| sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`" | |||
| sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?" | |||
| return sql, args | |||
| } | |||
| func (db *tidb) TableCheckSql(tableName string) (string, []interface{}) { | |||
| args := []interface{}{db.DbName, tableName} | |||
| sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" | |||
| return sql, args | |||
| } | |||
| func (db *tidb) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { | |||
| args := []interface{}{db.DbName, tableName} | |||
| s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," + | |||
| " `COLUMN_KEY`, `EXTRA` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" | |||
| rows, err := db.DB().Query(s, args...) | |||
| db.LogSQL(s, args) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| defer rows.Close() | |||
| cols := make(map[string]*core.Column) | |||
| colSeq := make([]string, 0) | |||
| for rows.Next() { | |||
| col := new(core.Column) | |||
| col.Indexes = make(map[string]int) | |||
| var columnName, isNullable, colType, colKey, extra string | |||
| var colDefault *string | |||
| err = rows.Scan(&columnName, &isNullable, &colDefault, &colType, &colKey, &extra) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| col.Name = strings.Trim(columnName, "` ") | |||
| if "YES" == isNullable { | |||
| col.Nullable = true | |||
| } | |||
| if colDefault != nil { | |||
| col.Default = *colDefault | |||
| if col.Default == "" { | |||
| col.DefaultIsEmpty = true | |||
| } | |||
| } | |||
| cts := strings.Split(colType, "(") | |||
| colName := cts[0] | |||
| colType = strings.ToUpper(colName) | |||
| var len1, len2 int | |||
| if len(cts) == 2 { | |||
| idx := strings.Index(cts[1], ")") | |||
| if colType == core.Enum && cts[1][0] == '\'' { //enum | |||
| options := strings.Split(cts[1][0:idx], ",") | |||
| col.EnumOptions = make(map[string]int) | |||
| for k, v := range options { | |||
| v = strings.TrimSpace(v) | |||
| v = strings.Trim(v, "'") | |||
| col.EnumOptions[v] = k | |||
| } | |||
| } else if colType == core.Set && cts[1][0] == '\'' { | |||
| options := strings.Split(cts[1][0:idx], ",") | |||
| col.SetOptions = make(map[string]int) | |||
| for k, v := range options { | |||
| v = strings.TrimSpace(v) | |||
| v = strings.Trim(v, "'") | |||
| col.SetOptions[v] = k | |||
| } | |||
| } else { | |||
| lens := strings.Split(cts[1][0:idx], ",") | |||
| len1, err = strconv.Atoi(strings.TrimSpace(lens[0])) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if len(lens) == 2 { | |||
| len2, err = strconv.Atoi(lens[1]) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| } | |||
| } | |||
| } | |||
| if colType == "FLOAT UNSIGNED" { | |||
| colType = "FLOAT" | |||
| } | |||
| col.Length = len1 | |||
| col.Length2 = len2 | |||
| if _, ok := core.SqlTypes[colType]; ok { | |||
| col.SQLType = core.SQLType{colType, len1, len2} | |||
| } else { | |||
| return nil, nil, errors.New(fmt.Sprintf("unkonw colType %v", colType)) | |||
| } | |||
| if colKey == "PRI" { | |||
| col.IsPrimaryKey = true | |||
| } | |||
| if colKey == "UNI" { | |||
| //col.is | |||
| } | |||
| if extra == "auto_increment" { | |||
| col.IsAutoIncrement = true | |||
| } | |||
| if col.SQLType.IsText() || col.SQLType.IsTime() { | |||
| if col.Default != "" { | |||
| col.Default = "'" + col.Default + "'" | |||
| } else { | |||
| if col.DefaultIsEmpty { | |||
| col.Default = "''" | |||
| } | |||
| } | |||
| } | |||
| cols[col.Name] = col | |||
| colSeq = append(colSeq, col.Name) | |||
| } | |||
| return colSeq, cols, nil | |||
| } | |||
| func (db *tidb) GetTables() ([]*core.Table, error) { | |||
| args := []interface{}{db.DbName} | |||
| s := "SELECT `TABLE_NAME`, `ENGINE`, `TABLE_ROWS`, `AUTO_INCREMENT` from " + | |||
| "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB')" | |||
| rows, err := db.DB().Query(s, args...) | |||
| db.LogSQL(s, args) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer rows.Close() | |||
| tables := make([]*core.Table, 0) | |||
| for rows.Next() { | |||
| table := core.NewEmptyTable() | |||
| var name, engine, tableRows string | |||
| var autoIncr *string | |||
| err = rows.Scan(&name, &engine, &tableRows, &autoIncr) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| table.Name = name | |||
| table.StoreEngine = engine | |||
| tables = append(tables, table) | |||
| } | |||
| return tables, nil | |||
| } | |||
| func (db *tidb) GetIndexes(tableName string) (map[string]*core.Index, error) { | |||
| args := []interface{}{db.DbName, tableName} | |||
| s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" | |||
| rows, err := db.DB().Query(s, args...) | |||
| db.LogSQL(s, args) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer rows.Close() | |||
| indexes := make(map[string]*core.Index, 0) | |||
| for rows.Next() { | |||
| var indexType int | |||
| var indexName, colName, nonUnique string | |||
| err = rows.Scan(&indexName, &nonUnique, &colName) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if indexName == "PRIMARY" { | |||
| continue | |||
| } | |||
| if "YES" == nonUnique || nonUnique == "1" { | |||
| indexType = core.IndexType | |||
| } else { | |||
| indexType = core.UniqueType | |||
| } | |||
| colName = strings.Trim(colName, "` ") | |||
| var isRegular bool | |||
| if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { | |||
| indexName = indexName[5+len(tableName) : len(indexName)] | |||
| isRegular = true | |||
| } | |||
| var index *core.Index | |||
| var ok bool | |||
| if index, ok = indexes[indexName]; !ok { | |||
| index = new(core.Index) | |||
| index.IsRegular = isRegular | |||
| index.Type = indexType | |||
| index.Name = indexName | |||
| indexes[indexName] = index | |||
| } | |||
| index.AddColumn(colName) | |||
| } | |||
| return indexes, nil | |||
| } | |||
| func (db *tidb) Filters() []core.Filter { | |||
| return []core.Filter{&core.IdFilter{}} | |||
| } | |||
| @@ -1,48 +0,0 @@ | |||
| // Copyright 2015 The Xorm Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package tidb | |||
| import ( | |||
| "errors" | |||
| "net/url" | |||
| "path/filepath" | |||
| "github.com/go-xorm/core" | |||
| ) | |||
| var ( | |||
| _ core.Dialect = (*tidb)(nil) | |||
| DBType core.DbType = "tidb" | |||
| ) | |||
| func init() { | |||
| core.RegisterDriver(string(DBType), &tidbDriver{}) | |||
| core.RegisterDialect(DBType, func() core.Dialect { | |||
| return &tidb{} | |||
| }) | |||
| } | |||
| type tidbDriver struct { | |||
| } | |||
| func (p *tidbDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { | |||
| u, err := url.Parse(dataSourceName) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if u.Scheme != "goleveldb" && u.Scheme != "memory" && u.Scheme != "boltdb" { | |||
| return nil, errors.New(u.Scheme + " is not supported yet.") | |||
| } | |||
| path := filepath.Join(u.Host, u.Path) | |||
| dbName := filepath.Clean(filepath.Base(path)) | |||
| uri := &core.Uri{ | |||
| DbType: DBType, | |||
| DbName: dbName, | |||
| } | |||
| return uri, nil | |||
| } | |||
| @@ -1,191 +0,0 @@ | |||
| All files in this repository are licensed as follows. If you contribute | |||
| to this repository, it is assumed that you license your contribution | |||
| under the same license unless you state otherwise. | |||
| All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. | |||
| This software is licensed under the LGPLv3, included below. | |||
| As a special exception to the GNU Lesser General Public License version 3 | |||
| ("LGPL3"), the copyright holders of this Library give you permission to | |||
| convey to a third party a Combined Work that links statically or dynamically | |||
| to this Library without providing any Minimal Corresponding Source or | |||
| Minimal Application Code as set out in 4d or providing the installation | |||
| information set out in section 4e, provided that you comply with the other | |||
| provisions of LGPL3 and provided that you meet, for the Application the | |||
| terms and conditions of the license(s) which apply to the Application. | |||
| Except as stated in this special exception, the provisions of LGPL3 will | |||
| continue to comply in full to this Library. If you modify this Library, you | |||
| may apply this exception to your version of this Library, but you are not | |||
| obliged to do so. If you do not wish to do so, delete this exception | |||
| statement from your version. This exception does not (and cannot) modify any | |||
| license terms which apply to the Application, with which you must still | |||
| comply. | |||
| GNU LESSER GENERAL PUBLIC LICENSE | |||
| Version 3, 29 June 2007 | |||
| Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | |||
| Everyone is permitted to copy and distribute verbatim copies | |||
| of this license document, but changing it is not allowed. | |||
| This version of the GNU Lesser General Public License incorporates | |||
| the terms and conditions of version 3 of the GNU General Public | |||
| License, supplemented by the additional permissions listed below. | |||
| 0. Additional Definitions. | |||
| As used herein, "this License" refers to version 3 of the GNU Lesser | |||
| General Public License, and the "GNU GPL" refers to version 3 of the GNU | |||
| General Public License. | |||
| "The Library" refers to a covered work governed by this License, | |||
| other than an Application or a Combined Work as defined below. | |||
| An "Application" is any work that makes use of an interface provided | |||
| by the Library, but which is not otherwise based on the Library. | |||
| Defining a subclass of a class defined by the Library is deemed a mode | |||
| of using an interface provided by the Library. | |||
| A "Combined Work" is a work produced by combining or linking an | |||
| Application with the Library. The particular version of the Library | |||
| with which the Combined Work was made is also called the "Linked | |||
| Version". | |||
| The "Minimal Corresponding Source" for a Combined Work means the | |||
| Corresponding Source for the Combined Work, excluding any source code | |||
| for portions of the Combined Work that, considered in isolation, are | |||
| based on the Application, and not on the Linked Version. | |||
| The "Corresponding Application Code" for a Combined Work means the | |||
| object code and/or source code for the Application, including any data | |||
| and utility programs needed for reproducing the Combined Work from the | |||
| Application, but excluding the System Libraries of the Combined Work. | |||
| 1. Exception to Section 3 of the GNU GPL. | |||
| You may convey a covered work under sections 3 and 4 of this License | |||
| without being bound by section 3 of the GNU GPL. | |||
| 2. Conveying Modified Versions. | |||
| If you modify a copy of the Library, and, in your modifications, a | |||
| facility refers to a function or data to be supplied by an Application | |||
| that uses the facility (other than as an argument passed when the | |||
| facility is invoked), then you may convey a copy of the modified | |||
| version: | |||
| a) under this License, provided that you make a good faith effort to | |||
| ensure that, in the event an Application does not supply the | |||
| function or data, the facility still operates, and performs | |||
| whatever part of its purpose remains meaningful, or | |||
| b) under the GNU GPL, with none of the additional permissions of | |||
| this License applicable to that copy. | |||
| 3. Object Code Incorporating Material from Library Header Files. | |||
| The object code form of an Application may incorporate material from | |||
| a header file that is part of the Library. You may convey such object | |||
| code under terms of your choice, provided that, if the incorporated | |||
| material is not limited to numerical parameters, data structure | |||
| layouts and accessors, or small macros, inline functions and templates | |||
| (ten or fewer lines in length), you do both of the following: | |||
| a) Give prominent notice with each copy of the object code that the | |||
| Library is used in it and that the Library and its use are | |||
| covered by this License. | |||
| b) Accompany the object code with a copy of the GNU GPL and this license | |||
| document. | |||
| 4. Combined Works. | |||
| You may convey a Combined Work under terms of your choice that, | |||
| taken together, effectively do not restrict modification of the | |||
| portions of the Library contained in the Combined Work and reverse | |||
| engineering for debugging such modifications, if you also do each of | |||
| the following: | |||
| a) Give prominent notice with each copy of the Combined Work that | |||
| the Library is used in it and that the Library and its use are | |||
| covered by this License. | |||
| b) Accompany the Combined Work with a copy of the GNU GPL and this license | |||
| document. | |||
| c) For a Combined Work that displays copyright notices during | |||
| execution, include the copyright notice for the Library among | |||
| these notices, as well as a reference directing the user to the | |||
| copies of the GNU GPL and this license document. | |||
| d) Do one of the following: | |||
| 0) Convey the Minimal Corresponding Source under the terms of this | |||
| License, and the Corresponding Application Code in a form | |||
| suitable for, and under terms that permit, the user to | |||
| recombine or relink the Application with a modified version of | |||
| the Linked Version to produce a modified Combined Work, in the | |||
| manner specified by section 6 of the GNU GPL for conveying | |||
| Corresponding Source. | |||
| 1) Use a suitable shared library mechanism for linking with the | |||
| Library. A suitable mechanism is one that (a) uses at run time | |||
| a copy of the Library already present on the user's computer | |||
| system, and (b) will operate properly with a modified version | |||
| of the Library that is interface-compatible with the Linked | |||
| Version. | |||
| e) Provide Installation Information, but only if you would otherwise | |||
| be required to provide such information under section 6 of the | |||
| GNU GPL, and only to the extent that such information is | |||
| necessary to install and execute a modified version of the | |||
| Combined Work produced by recombining or relinking the | |||
| Application with a modified version of the Linked Version. (If | |||
| you use option 4d0, the Installation Information must accompany | |||
| the Minimal Corresponding Source and Corresponding Application | |||
| Code. If you use option 4d1, you must provide the Installation | |||
| Information in the manner specified by section 6 of the GNU GPL | |||
| for conveying Corresponding Source.) | |||
| 5. Combined Libraries. | |||
| You may place library facilities that are a work based on the | |||
| Library side by side in a single library together with other library | |||
| facilities that are not Applications and are not covered by this | |||
| License, and convey such a combined library under terms of your | |||
| choice, if you do both of the following: | |||
| a) Accompany the combined library with a copy of the same work based | |||
| on the Library, uncombined with any other library facilities, | |||
| conveyed under the terms of this License. | |||
| b) Give prominent notice with the combined library that part of it | |||
| is a work based on the Library, and explaining where to find the | |||
| accompanying uncombined form of the same work. | |||
| 6. Revised Versions of the GNU Lesser General Public License. | |||
| The Free Software Foundation may publish revised and/or new versions | |||
| of the GNU Lesser General Public License from time to time. Such new | |||
| versions will be similar in spirit to the present version, but may | |||
| differ in detail to address new problems or concerns. | |||
| Each version is given a distinguishing version number. If the | |||
| Library as you received it specifies that a certain numbered version | |||
| of the GNU Lesser General Public License "or any later version" | |||
| applies to it, you have the option of following the terms and | |||
| conditions either of that published version or of any later version | |||
| published by the Free Software Foundation. If the Library as you | |||
| received it does not specify a version number of the GNU Lesser | |||
| General Public License, you may choose any version of the GNU Lesser | |||
| General Public License ever published by the Free Software Foundation. | |||
| If the Library as you received it specifies that a proxy can decide | |||
| whether future versions of the GNU Lesser General Public License shall | |||
| apply, that proxy's public statement of acceptance of any version is | |||
| permanent authorization for you to choose that version for the | |||
| Library. | |||
| @@ -1,81 +0,0 @@ | |||
| // Copyright 2013, 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| /* | |||
| [godoc-link-here] | |||
| The juju/errors provides an easy way to annotate errors without losing the | |||
| orginal error context. | |||
| The exported `New` and `Errorf` functions are designed to replace the | |||
| `errors.New` and `fmt.Errorf` functions respectively. The same underlying | |||
| error is there, but the package also records the location at which the error | |||
| was created. | |||
| A primary use case for this library is to add extra context any time an | |||
| error is returned from a function. | |||
| if err := SomeFunc(); err != nil { | |||
| return err | |||
| } | |||
| This instead becomes: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| which just records the file and line number of the Trace call, or | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Annotate(err, "more context") | |||
| } | |||
| which also adds an annotation to the error. | |||
| When you want to check to see if an error is of a particular type, a helper | |||
| function is normally exported by the package that returned the error, like the | |||
| `os` package does. The underlying cause of the error is available using the | |||
| `Cause` function. | |||
| os.IsNotExist(errors.Cause(err)) | |||
| The result of the `Error()` call on an annotated error is the annotations joined | |||
| with colons, then the result of the `Error()` method for the underlying error | |||
| that was the cause. | |||
| err := errors.Errorf("original") | |||
| err = errors.Annotatef(err, "context") | |||
| err = errors.Annotatef(err, "more context") | |||
| err.Error() -> "more context: context: original" | |||
| Obviously recording the file, line and functions is not very useful if you | |||
| cannot get them back out again. | |||
| errors.ErrorStack(err) | |||
| will return something like: | |||
| first error | |||
| github.com/juju/errors/annotation_test.go:193: | |||
| github.com/juju/errors/annotation_test.go:194: annotation | |||
| github.com/juju/errors/annotation_test.go:195: | |||
| github.com/juju/errors/annotation_test.go:196: more context | |||
| github.com/juju/errors/annotation_test.go:197: | |||
| The first error was generated by an external system, so there was no location | |||
| associated. The second, fourth, and last lines were generated with Trace calls, | |||
| and the other two through Annotate. | |||
| Sometimes when responding to an error you want to return a more specific error | |||
| for the situation. | |||
| if err := FindField(field); err != nil { | |||
| return errors.Wrap(err, errors.NotFoundf(field)) | |||
| } | |||
| This returns an error where the complete error stack is still available, and | |||
| `errors.Cause()` will return the `NotFound` error. | |||
| */ | |||
| package errors | |||
| @@ -1,145 +0,0 @@ | |||
| // Copyright 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "fmt" | |||
| "reflect" | |||
| "runtime" | |||
| ) | |||
| // Err holds a description of an error along with information about | |||
| // where the error was created. | |||
| // | |||
| // It may be embedded in custom error types to add extra information that | |||
| // this errors package can understand. | |||
| type Err struct { | |||
| // message holds an annotation of the error. | |||
| message string | |||
| // cause holds the cause of the error as returned | |||
| // by the Cause method. | |||
| cause error | |||
| // previous holds the previous error in the error stack, if any. | |||
| previous error | |||
| // file and line hold the source code location where the error was | |||
| // created. | |||
| file string | |||
| line int | |||
| } | |||
| // NewErr is used to return an Err for the purpose of embedding in other | |||
| // structures. The location is not specified, and needs to be set with a call | |||
| // to SetLocation. | |||
| // | |||
| // For example: | |||
| // type FooError struct { | |||
| // errors.Err | |||
| // code int | |||
| // } | |||
| // | |||
| // func NewFooError(code int) error { | |||
| // err := &FooError{errors.NewErr("foo"), code} | |||
| // err.SetLocation(1) | |||
| // return err | |||
| // } | |||
| func NewErr(format string, args ...interface{}) Err { | |||
| return Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| } | |||
| } | |||
| // NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other | |||
| // structures. The location is not specified, and needs to be set with a call | |||
| // to SetLocation. | |||
| // | |||
| // For example: | |||
| // type FooError struct { | |||
| // errors.Err | |||
| // code int | |||
| // } | |||
| // | |||
| // func (e *FooError) Annotate(format string, args ...interface{}) error { | |||
| // err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code} | |||
| // err.SetLocation(1) | |||
| // return err | |||
| // }) | |||
| func NewErrWithCause(other error, format string, args ...interface{}) Err { | |||
| return Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| cause: Cause(other), | |||
| previous: other, | |||
| } | |||
| } | |||
| // Location is the file and line of where the error was most recently | |||
| // created or annotated. | |||
| func (e *Err) Location() (filename string, line int) { | |||
| return e.file, e.line | |||
| } | |||
| // Underlying returns the previous error in the error stack, if any. A client | |||
| // should not ever really call this method. It is used to build the error | |||
| // stack and should not be introspected by client calls. Or more | |||
| // specifically, clients should not depend on anything but the `Cause` of an | |||
| // error. | |||
| func (e *Err) Underlying() error { | |||
| return e.previous | |||
| } | |||
| // The Cause of an error is the most recent error in the error stack that | |||
| // meets one of these criteria: the original error that was raised; the new | |||
| // error that was passed into the Wrap function; the most recently masked | |||
| // error; or nil if the error itself is considered the Cause. Normally this | |||
| // method is not invoked directly, but instead through the Cause stand alone | |||
| // function. | |||
| func (e *Err) Cause() error { | |||
| return e.cause | |||
| } | |||
| // Message returns the message stored with the most recent location. This is | |||
| // the empty string if the most recent call was Trace, or the message stored | |||
| // with Annotate or Mask. | |||
| func (e *Err) Message() string { | |||
| return e.message | |||
| } | |||
| // Error implements error.Error. | |||
| func (e *Err) Error() string { | |||
| // We want to walk up the stack of errors showing the annotations | |||
| // as long as the cause is the same. | |||
| err := e.previous | |||
| if !sameError(Cause(err), e.cause) && e.cause != nil { | |||
| err = e.cause | |||
| } | |||
| switch { | |||
| case err == nil: | |||
| return e.message | |||
| case e.message == "": | |||
| return err.Error() | |||
| } | |||
| return fmt.Sprintf("%s: %v", e.message, err) | |||
| } | |||
| // SetLocation records the source location of the error at callDepth stack | |||
| // frames above the call. | |||
| func (e *Err) SetLocation(callDepth int) { | |||
| _, file, line, _ := runtime.Caller(callDepth + 1) | |||
| e.file = trimGoPath(file) | |||
| e.line = line | |||
| } | |||
| // StackTrace returns one string for each location recorded in the stack of | |||
| // errors. The first value is the originating error, with a line for each | |||
| // other annotation or tracing of the error. | |||
| func (e *Err) StackTrace() []string { | |||
| return errorStack(e) | |||
| } | |||
| // Ideally we'd have a way to check identity, but deep equals will do. | |||
| func sameError(e1, e2 error) bool { | |||
| return reflect.DeepEqual(e1, e2) | |||
| } | |||
| @@ -1,284 +0,0 @@ | |||
| // Copyright 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "fmt" | |||
| ) | |||
| // wrap is a helper to construct an *wrapper. | |||
| func wrap(err error, format, suffix string, args ...interface{}) Err { | |||
| newErr := Err{ | |||
| message: fmt.Sprintf(format+suffix, args...), | |||
| previous: err, | |||
| } | |||
| newErr.SetLocation(2) | |||
| return newErr | |||
| } | |||
| // notFound represents an error when something has not been found. | |||
| type notFound struct { | |||
| Err | |||
| } | |||
| // NotFoundf returns an error which satisfies IsNotFound(). | |||
| func NotFoundf(format string, args ...interface{}) error { | |||
| return ¬Found{wrap(nil, format, " not found", args...)} | |||
| } | |||
| // NewNotFound returns an error which wraps err that satisfies | |||
| // IsNotFound(). | |||
| func NewNotFound(err error, msg string) error { | |||
| return ¬Found{wrap(err, msg, "")} | |||
| } | |||
| // IsNotFound reports whether err was created with NotFoundf() or | |||
| // NewNotFound(). | |||
| func IsNotFound(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notFound) | |||
| return ok | |||
| } | |||
| // userNotFound represents an error when an inexistent user is looked up. | |||
| type userNotFound struct { | |||
| Err | |||
| } | |||
| // UserNotFoundf returns an error which satisfies IsUserNotFound(). | |||
| func UserNotFoundf(format string, args ...interface{}) error { | |||
| return &userNotFound{wrap(nil, format, " user not found", args...)} | |||
| } | |||
| // NewUserNotFound returns an error which wraps err and satisfies | |||
| // IsUserNotFound(). | |||
| func NewUserNotFound(err error, msg string) error { | |||
| return &userNotFound{wrap(err, msg, "")} | |||
| } | |||
| // IsUserNotFound reports whether err was created with UserNotFoundf() or | |||
| // NewUserNotFound(). | |||
| func IsUserNotFound(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*userNotFound) | |||
| return ok | |||
| } | |||
| // unauthorized represents an error when an operation is unauthorized. | |||
| type unauthorized struct { | |||
| Err | |||
| } | |||
| // Unauthorizedf returns an error which satisfies IsUnauthorized(). | |||
| func Unauthorizedf(format string, args ...interface{}) error { | |||
| return &unauthorized{wrap(nil, format, "", args...)} | |||
| } | |||
| // NewUnauthorized returns an error which wraps err and satisfies | |||
| // IsUnauthorized(). | |||
| func NewUnauthorized(err error, msg string) error { | |||
| return &unauthorized{wrap(err, msg, "")} | |||
| } | |||
| // IsUnauthorized reports whether err was created with Unauthorizedf() or | |||
| // NewUnauthorized(). | |||
| func IsUnauthorized(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*unauthorized) | |||
| return ok | |||
| } | |||
| // notImplemented represents an error when something is not | |||
| // implemented. | |||
| type notImplemented struct { | |||
| Err | |||
| } | |||
| // NotImplementedf returns an error which satisfies IsNotImplemented(). | |||
| func NotImplementedf(format string, args ...interface{}) error { | |||
| return ¬Implemented{wrap(nil, format, " not implemented", args...)} | |||
| } | |||
| // NewNotImplemented returns an error which wraps err and satisfies | |||
| // IsNotImplemented(). | |||
| func NewNotImplemented(err error, msg string) error { | |||
| return ¬Implemented{wrap(err, msg, "")} | |||
| } | |||
| // IsNotImplemented reports whether err was created with | |||
| // NotImplementedf() or NewNotImplemented(). | |||
| func IsNotImplemented(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notImplemented) | |||
| return ok | |||
| } | |||
| // alreadyExists represents and error when something already exists. | |||
| type alreadyExists struct { | |||
| Err | |||
| } | |||
| // AlreadyExistsf returns an error which satisfies IsAlreadyExists(). | |||
| func AlreadyExistsf(format string, args ...interface{}) error { | |||
| return &alreadyExists{wrap(nil, format, " already exists", args...)} | |||
| } | |||
| // NewAlreadyExists returns an error which wraps err and satisfies | |||
| // IsAlreadyExists(). | |||
| func NewAlreadyExists(err error, msg string) error { | |||
| return &alreadyExists{wrap(err, msg, "")} | |||
| } | |||
| // IsAlreadyExists reports whether the error was created with | |||
| // AlreadyExistsf() or NewAlreadyExists(). | |||
| func IsAlreadyExists(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*alreadyExists) | |||
| return ok | |||
| } | |||
| // notSupported represents an error when something is not supported. | |||
| type notSupported struct { | |||
| Err | |||
| } | |||
| // NotSupportedf returns an error which satisfies IsNotSupported(). | |||
| func NotSupportedf(format string, args ...interface{}) error { | |||
| return ¬Supported{wrap(nil, format, " not supported", args...)} | |||
| } | |||
| // NewNotSupported returns an error which wraps err and satisfies | |||
| // IsNotSupported(). | |||
| func NewNotSupported(err error, msg string) error { | |||
| return ¬Supported{wrap(err, msg, "")} | |||
| } | |||
| // IsNotSupported reports whether the error was created with | |||
| // NotSupportedf() or NewNotSupported(). | |||
| func IsNotSupported(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notSupported) | |||
| return ok | |||
| } | |||
| // notValid represents an error when something is not valid. | |||
| type notValid struct { | |||
| Err | |||
| } | |||
| // NotValidf returns an error which satisfies IsNotValid(). | |||
| func NotValidf(format string, args ...interface{}) error { | |||
| return ¬Valid{wrap(nil, format, " not valid", args...)} | |||
| } | |||
| // NewNotValid returns an error which wraps err and satisfies IsNotValid(). | |||
| func NewNotValid(err error, msg string) error { | |||
| return ¬Valid{wrap(err, msg, "")} | |||
| } | |||
| // IsNotValid reports whether the error was created with NotValidf() or | |||
| // NewNotValid(). | |||
| func IsNotValid(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notValid) | |||
| return ok | |||
| } | |||
| // notProvisioned represents an error when something is not yet provisioned. | |||
| type notProvisioned struct { | |||
| Err | |||
| } | |||
| // NotProvisionedf returns an error which satisfies IsNotProvisioned(). | |||
| func NotProvisionedf(format string, args ...interface{}) error { | |||
| return ¬Provisioned{wrap(nil, format, " not provisioned", args...)} | |||
| } | |||
| // NewNotProvisioned returns an error which wraps err that satisfies | |||
| // IsNotProvisioned(). | |||
| func NewNotProvisioned(err error, msg string) error { | |||
| return ¬Provisioned{wrap(err, msg, "")} | |||
| } | |||
| // IsNotProvisioned reports whether err was created with NotProvisionedf() or | |||
| // NewNotProvisioned(). | |||
| func IsNotProvisioned(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notProvisioned) | |||
| return ok | |||
| } | |||
| // notAssigned represents an error when something is not yet assigned to | |||
| // something else. | |||
| type notAssigned struct { | |||
| Err | |||
| } | |||
| // NotAssignedf returns an error which satisfies IsNotAssigned(). | |||
| func NotAssignedf(format string, args ...interface{}) error { | |||
| return ¬Assigned{wrap(nil, format, " not assigned", args...)} | |||
| } | |||
| // NewNotAssigned returns an error which wraps err that satisfies | |||
| // IsNotAssigned(). | |||
| func NewNotAssigned(err error, msg string) error { | |||
| return ¬Assigned{wrap(err, msg, "")} | |||
| } | |||
| // IsNotAssigned reports whether err was created with NotAssignedf() or | |||
| // NewNotAssigned(). | |||
| func IsNotAssigned(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notAssigned) | |||
| return ok | |||
| } | |||
| // badRequest represents an error when a request has bad parameters. | |||
| type badRequest struct { | |||
| Err | |||
| } | |||
| // BadRequestf returns an error which satisfies IsBadRequest(). | |||
| func BadRequestf(format string, args ...interface{}) error { | |||
| return &badRequest{wrap(nil, format, "", args...)} | |||
| } | |||
| // NewBadRequest returns an error which wraps err that satisfies | |||
| // IsBadRequest(). | |||
| func NewBadRequest(err error, msg string) error { | |||
| return &badRequest{wrap(err, msg, "")} | |||
| } | |||
| // IsBadRequest reports whether err was created with BadRequestf() or | |||
| // NewBadRequest(). | |||
| func IsBadRequest(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*badRequest) | |||
| return ok | |||
| } | |||
| // methodNotAllowed represents an error when an HTTP request | |||
| // is made with an inappropriate method. | |||
| type methodNotAllowed struct { | |||
| Err | |||
| } | |||
| // MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed(). | |||
| func MethodNotAllowedf(format string, args ...interface{}) error { | |||
| return &methodNotAllowed{wrap(nil, format, "", args...)} | |||
| } | |||
| // NewMethodNotAllowed returns an error which wraps err that satisfies | |||
| // IsMethodNotAllowed(). | |||
| func NewMethodNotAllowed(err error, msg string) error { | |||
| return &methodNotAllowed{wrap(err, msg, "")} | |||
| } | |||
| // IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or | |||
| // NewMethodNotAllowed(). | |||
| func IsMethodNotAllowed(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*methodNotAllowed) | |||
| return ok | |||
| } | |||
| @@ -1,330 +0,0 @@ | |||
| // Copyright 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "fmt" | |||
| "strings" | |||
| ) | |||
| // New is a drop in replacement for the standard libary errors module that records | |||
| // the location that the error is created. | |||
| // | |||
| // For example: | |||
| // return errors.New("validation failed") | |||
| // | |||
| func New(message string) error { | |||
| err := &Err{message: message} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Errorf creates a new annotated error and records the location that the | |||
| // error is created. This should be a drop in replacement for fmt.Errorf. | |||
| // | |||
| // For example: | |||
| // return errors.Errorf("validation failed: %s", message) | |||
| // | |||
| func Errorf(format string, args ...interface{}) error { | |||
| err := &Err{message: fmt.Sprintf(format, args...)} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Trace adds the location of the Trace call to the stack. The Cause of the | |||
| // resulting error is the same as the error parameter. If the other error is | |||
| // nil, the result will be nil. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Trace(err) | |||
| // } | |||
| // | |||
| func Trace(other error) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{previous: other, cause: Cause(other)} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Annotate is used to add extra context to an existing error. The location of | |||
| // the Annotate call is recorded with the annotations. The file, line and | |||
| // function are also recorded. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Annotate(err, "failed to frombulate") | |||
| // } | |||
| // | |||
| func Annotate(other error, message string) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| previous: other, | |||
| cause: Cause(other), | |||
| message: message, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Annotatef is used to add extra context to an existing error. The location of | |||
| // the Annotate call is recorded with the annotations. The file, line and | |||
| // function are also recorded. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Annotatef(err, "failed to frombulate the %s", arg) | |||
| // } | |||
| // | |||
| func Annotatef(other error, format string, args ...interface{}) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| previous: other, | |||
| cause: Cause(other), | |||
| message: fmt.Sprintf(format, args...), | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // DeferredAnnotatef annotates the given error (when it is not nil) with the given | |||
| // format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef | |||
| // does nothing. This method is used in a defer statement in order to annotate any | |||
| // resulting error with the same message. | |||
| // | |||
| // For example: | |||
| // | |||
| // defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg) | |||
| // | |||
| func DeferredAnnotatef(err *error, format string, args ...interface{}) { | |||
| if *err == nil { | |||
| return | |||
| } | |||
| newErr := &Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| cause: Cause(*err), | |||
| previous: *err, | |||
| } | |||
| newErr.SetLocation(1) | |||
| *err = newErr | |||
| } | |||
| // Wrap changes the Cause of the error. The location of the Wrap call is also | |||
| // stored in the error stack. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // newErr := &packageError{"more context", private_value} | |||
| // return errors.Wrap(err, newErr) | |||
| // } | |||
| // | |||
| func Wrap(other, newDescriptive error) error { | |||
| err := &Err{ | |||
| previous: other, | |||
| cause: newDescriptive, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Wrapf changes the Cause of the error, and adds an annotation. The location | |||
| // of the Wrap call is also stored in the error stack. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Wrapf(err, simpleErrorType, "invalid value %q", value) | |||
| // } | |||
| // | |||
| func Wrapf(other, newDescriptive error, format string, args ...interface{}) error { | |||
| err := &Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| previous: other, | |||
| cause: newDescriptive, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Mask masks the given error with the given format string and arguments (like | |||
| // fmt.Sprintf), returning a new error that maintains the error stack, but | |||
| // hides the underlying error type. The error string still contains the full | |||
| // annotations. If you want to hide the annotations, call Wrap. | |||
| func Maskf(other error, format string, args ...interface{}) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| previous: other, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Mask hides the underlying error type, and records the location of the masking. | |||
| func Mask(other error) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| previous: other, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Cause returns the cause of the given error. This will be either the | |||
| // original error, or the result of a Wrap or Mask call. | |||
| // | |||
| // Cause is the usual way to diagnose errors that may have been wrapped by | |||
| // the other errors functions. | |||
| func Cause(err error) error { | |||
| var diag error | |||
| if err, ok := err.(causer); ok { | |||
| diag = err.Cause() | |||
| } | |||
| if diag != nil { | |||
| return diag | |||
| } | |||
| return err | |||
| } | |||
| type causer interface { | |||
| Cause() error | |||
| } | |||
| type wrapper interface { | |||
| // Message returns the top level error message, | |||
| // not including the message from the Previous | |||
| // error. | |||
| Message() string | |||
| // Underlying returns the Previous error, or nil | |||
| // if there is none. | |||
| Underlying() error | |||
| } | |||
| type locationer interface { | |||
| Location() (string, int) | |||
| } | |||
| var ( | |||
| _ wrapper = (*Err)(nil) | |||
| _ locationer = (*Err)(nil) | |||
| _ causer = (*Err)(nil) | |||
| ) | |||
| // Details returns information about the stack of errors wrapped by err, in | |||
| // the format: | |||
| // | |||
| // [{filename:99: error one} {otherfile:55: cause of error one}] | |||
| // | |||
| // This is a terse alternative to ErrorStack as it returns a single line. | |||
| func Details(err error) string { | |||
| if err == nil { | |||
| return "[]" | |||
| } | |||
| var s []byte | |||
| s = append(s, '[') | |||
| for { | |||
| s = append(s, '{') | |||
| if err, ok := err.(locationer); ok { | |||
| file, line := err.Location() | |||
| if file != "" { | |||
| s = append(s, fmt.Sprintf("%s:%d", file, line)...) | |||
| s = append(s, ": "...) | |||
| } | |||
| } | |||
| if cerr, ok := err.(wrapper); ok { | |||
| s = append(s, cerr.Message()...) | |||
| err = cerr.Underlying() | |||
| } else { | |||
| s = append(s, err.Error()...) | |||
| err = nil | |||
| } | |||
| s = append(s, '}') | |||
| if err == nil { | |||
| break | |||
| } | |||
| s = append(s, ' ') | |||
| } | |||
| s = append(s, ']') | |||
| return string(s) | |||
| } | |||
| // ErrorStack returns a string representation of the annotated error. If the | |||
| // error passed as the parameter is not an annotated error, the result is | |||
| // simply the result of the Error() method on that error. | |||
| // | |||
| // If the error is an annotated error, a multi-line string is returned where | |||
| // each line represents one entry in the annotation stack. The full filename | |||
| // from the call stack is used in the output. | |||
| // | |||
| // first error | |||
| // github.com/juju/errors/annotation_test.go:193: | |||
| // github.com/juju/errors/annotation_test.go:194: annotation | |||
| // github.com/juju/errors/annotation_test.go:195: | |||
| // github.com/juju/errors/annotation_test.go:196: more context | |||
| // github.com/juju/errors/annotation_test.go:197: | |||
| func ErrorStack(err error) string { | |||
| return strings.Join(errorStack(err), "\n") | |||
| } | |||
| func errorStack(err error) []string { | |||
| if err == nil { | |||
| return nil | |||
| } | |||
| // We want the first error first | |||
| var lines []string | |||
| for { | |||
| var buff []byte | |||
| if err, ok := err.(locationer); ok { | |||
| file, line := err.Location() | |||
| // Strip off the leading GOPATH/src path elements. | |||
| file = trimGoPath(file) | |||
| if file != "" { | |||
| buff = append(buff, fmt.Sprintf("%s:%d", file, line)...) | |||
| buff = append(buff, ": "...) | |||
| } | |||
| } | |||
| if cerr, ok := err.(wrapper); ok { | |||
| message := cerr.Message() | |||
| buff = append(buff, message...) | |||
| // If there is a cause for this error, and it is different to the cause | |||
| // of the underlying error, then output the error string in the stack trace. | |||
| var cause error | |||
| if err1, ok := err.(causer); ok { | |||
| cause = err1.Cause() | |||
| } | |||
| err = cerr.Underlying() | |||
| if cause != nil && !sameError(Cause(err), cause) { | |||
| if message != "" { | |||
| buff = append(buff, ": "...) | |||
| } | |||
| buff = append(buff, cause.Error()...) | |||
| } | |||
| } else { | |||
| buff = append(buff, err.Error()...) | |||
| err = nil | |||
| } | |||
| lines = append(lines, string(buff)) | |||
| if err == nil { | |||
| break | |||
| } | |||
| } | |||
| // reverse the lines to get the original error, which was at the end of | |||
| // the list, back to the start. | |||
| var result []string | |||
| for i := len(lines); i > 0; i-- { | |||
| result = append(result, lines[i-1]) | |||
| } | |||
| return result | |||
| } | |||
| @@ -1,38 +0,0 @@ | |||
| // Copyright 2013, 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "runtime" | |||
| "strings" | |||
| ) | |||
| // prefixSize is used internally to trim the user specific path from the | |||
| // front of the returned filenames from the runtime call stack. | |||
| var prefixSize int | |||
| // goPath is the deduced path based on the location of this file as compiled. | |||
| var goPath string | |||
| func init() { | |||
| _, file, _, ok := runtime.Caller(0) | |||
| if file == "?" { | |||
| return | |||
| } | |||
| if ok { | |||
| // We know that the end of the file should be: | |||
| // github.com/juju/errors/path.go | |||
| size := len(file) | |||
| suffix := len("github.com/juju/errors/path.go") | |||
| goPath = file[:size-suffix] | |||
| prefixSize = len(goPath) | |||
| } | |||
| } | |||
| func trimGoPath(filename string) string { | |||
| if strings.HasPrefix(filename, goPath) { | |||
| return filename[prefixSize:] | |||
| } | |||
| return filename | |||
| } | |||
| @@ -1,50 +0,0 @@ | |||
| package deadline | |||
| import ( | |||
| "io" | |||
| "time" | |||
| ) | |||
| type DeadlineReader interface { | |||
| io.Reader | |||
| SetReadDeadline(t time.Time) error | |||
| } | |||
| type DeadlineWriter interface { | |||
| io.Writer | |||
| SetWriteDeadline(t time.Time) error | |||
| } | |||
| type DeadlineReadWriter interface { | |||
| io.ReadWriter | |||
| SetReadDeadline(t time.Time) error | |||
| SetWriteDeadline(t time.Time) error | |||
| } | |||
| type deadlineReader struct { | |||
| DeadlineReader | |||
| timeout time.Duration | |||
| } | |||
| func (r *deadlineReader) Read(p []byte) (int, error) { | |||
| r.DeadlineReader.SetReadDeadline(time.Now().Add(r.timeout)) | |||
| return r.DeadlineReader.Read(p) | |||
| } | |||
| func NewDeadlineReader(r DeadlineReader, timeout time.Duration) io.Reader { | |||
| return &deadlineReader{DeadlineReader: r, timeout: timeout} | |||
| } | |||
| type deadlineWriter struct { | |||
| DeadlineWriter | |||
| timeout time.Duration | |||
| } | |||
| func (r *deadlineWriter) Write(p []byte) (int, error) { | |||
| r.DeadlineWriter.SetWriteDeadline(time.Now().Add(r.timeout)) | |||
| return r.DeadlineWriter.Write(p) | |||
| } | |||
| func NewDeadlineWriter(r DeadlineWriter, timeout time.Duration) io.Writer { | |||
| return &deadlineWriter{DeadlineWriter: r, timeout: timeout} | |||
| } | |||
| @@ -1,25 +0,0 @@ | |||
| Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com> | |||
| All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above copyright | |||
| notice, this list of conditions and the following disclaimer in the | |||
| documentation and/or other materials provided with the distribution. | |||
| * Neither the name of the author nor the | |||
| names of its contributors may be used to endorse or promote products | |||
| derived from this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | |||
| ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |||
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
| DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY | |||
| DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |||
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |||
| LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |||
| ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |||
| SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -1,788 +0,0 @@ | |||
| package zk | |||
| /* | |||
| TODO: | |||
| * make sure a ping response comes back in a reasonable time | |||
| Possible watcher events: | |||
| * Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err} | |||
| */ | |||
| import ( | |||
| "crypto/rand" | |||
| "encoding/binary" | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "log" | |||
| "net" | |||
| "strconv" | |||
| "strings" | |||
| "sync" | |||
| "sync/atomic" | |||
| "time" | |||
| ) | |||
| var ErrNoServer = errors.New("zk: could not connect to a server") | |||
| const ( | |||
| bufferSize = 10 * 1024 * 1024 | |||
| eventChanSize = 6 | |||
| sendChanSize = 16 | |||
| protectedPrefix = "_c_" | |||
| ) | |||
| type watchType int | |||
| const ( | |||
| watchTypeData = iota | |||
| watchTypeExist = iota | |||
| watchTypeChild = iota | |||
| ) | |||
| type watchPathType struct { | |||
| path string | |||
| wType watchType | |||
| } | |||
| type Dialer func(network, address string, timeout time.Duration) (net.Conn, error) | |||
| type Conn struct { | |||
| lastZxid int64 | |||
| sessionID int64 | |||
| state State // must be 32-bit aligned | |||
| xid int32 | |||
| timeout int32 // session timeout in seconds | |||
| passwd []byte | |||
| dialer Dialer | |||
| servers []string | |||
| serverIndex int | |||
| conn net.Conn | |||
| eventChan chan Event | |||
| shouldQuit chan bool | |||
| pingInterval time.Duration | |||
| recvTimeout time.Duration | |||
| connectTimeout time.Duration | |||
| sendChan chan *request | |||
| requests map[int32]*request // Xid -> pending request | |||
| requestsLock sync.Mutex | |||
| watchers map[watchPathType][]chan Event | |||
| watchersLock sync.Mutex | |||
| // Debug (used by unit tests) | |||
| reconnectDelay time.Duration | |||
| } | |||
| type request struct { | |||
| xid int32 | |||
| opcode int32 | |||
| pkt interface{} | |||
| recvStruct interface{} | |||
| recvChan chan response | |||
| // Because sending and receiving happen in separate go routines, there's | |||
| // a possible race condition when creating watches from outside the read | |||
| // loop. We must ensure that a watcher gets added to the list synchronously | |||
| // with the response from the server on any request that creates a watch. | |||
| // In order to not hard code the watch logic for each opcode in the recv | |||
| // loop the caller can use recvFunc to insert some synchronously code | |||
| // after a response. | |||
| recvFunc func(*request, *responseHeader, error) | |||
| } | |||
| type response struct { | |||
| zxid int64 | |||
| err error | |||
| } | |||
| type Event struct { | |||
| Type EventType | |||
| State State | |||
| Path string // For non-session events, the path of the watched node. | |||
| Err error | |||
| } | |||
| func Connect(servers []string, recvTimeout time.Duration) (*Conn, <-chan Event, error) { | |||
| return ConnectWithDialer(servers, recvTimeout, nil) | |||
| } | |||
| func ConnectWithDialer(servers []string, recvTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) { | |||
| // Randomize the order of the servers to avoid creating hotspots | |||
| stringShuffle(servers) | |||
| for i, addr := range servers { | |||
| if !strings.Contains(addr, ":") { | |||
| servers[i] = addr + ":" + strconv.Itoa(DefaultPort) | |||
| } | |||
| } | |||
| ec := make(chan Event, eventChanSize) | |||
| if dialer == nil { | |||
| dialer = net.DialTimeout | |||
| } | |||
| conn := Conn{ | |||
| dialer: dialer, | |||
| servers: servers, | |||
| serverIndex: 0, | |||
| conn: nil, | |||
| state: StateDisconnected, | |||
| eventChan: ec, | |||
| shouldQuit: make(chan bool), | |||
| recvTimeout: recvTimeout, | |||
| pingInterval: time.Duration((int64(recvTimeout) / 2)), | |||
| connectTimeout: 1 * time.Second, | |||
| sendChan: make(chan *request, sendChanSize), | |||
| requests: make(map[int32]*request), | |||
| watchers: make(map[watchPathType][]chan Event), | |||
| passwd: emptyPassword, | |||
| timeout: 30000, | |||
| // Debug | |||
| reconnectDelay: time.Second, | |||
| } | |||
| go func() { | |||
| conn.loop() | |||
| conn.flushRequests(ErrClosing) | |||
| conn.invalidateWatches(ErrClosing) | |||
| close(conn.eventChan) | |||
| }() | |||
| return &conn, ec, nil | |||
| } | |||
| func (c *Conn) Close() { | |||
| close(c.shouldQuit) | |||
| select { | |||
| case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil): | |||
| case <-time.After(time.Second): | |||
| } | |||
| } | |||
| func (c *Conn) State() State { | |||
| return State(atomic.LoadInt32((*int32)(&c.state))) | |||
| } | |||
| func (c *Conn) setState(state State) { | |||
| atomic.StoreInt32((*int32)(&c.state), int32(state)) | |||
| select { | |||
| case c.eventChan <- Event{Type: EventSession, State: state}: | |||
| default: | |||
| // panic("zk: event channel full - it must be monitored and never allowed to be full") | |||
| } | |||
| } | |||
| func (c *Conn) connect() { | |||
| c.serverIndex = (c.serverIndex + 1) % len(c.servers) | |||
| startIndex := c.serverIndex | |||
| c.setState(StateConnecting) | |||
| for { | |||
| zkConn, err := c.dialer("tcp", c.servers[c.serverIndex], c.connectTimeout) | |||
| if err == nil { | |||
| c.conn = zkConn | |||
| c.setState(StateConnected) | |||
| return | |||
| } | |||
| log.Printf("Failed to connect to %s: %+v", c.servers[c.serverIndex], err) | |||
| c.serverIndex = (c.serverIndex + 1) % len(c.servers) | |||
| if c.serverIndex == startIndex { | |||
| c.flushUnsentRequests(ErrNoServer) | |||
| time.Sleep(time.Second) | |||
| } | |||
| } | |||
| } | |||
| func (c *Conn) loop() { | |||
| for { | |||
| c.connect() | |||
| err := c.authenticate() | |||
| switch { | |||
| case err == ErrSessionExpired: | |||
| c.invalidateWatches(err) | |||
| case err != nil && c.conn != nil: | |||
| c.conn.Close() | |||
| case err == nil: | |||
| closeChan := make(chan bool) // channel to tell send loop stop | |||
| var wg sync.WaitGroup | |||
| wg.Add(1) | |||
| go func() { | |||
| c.sendLoop(c.conn, closeChan) | |||
| c.conn.Close() // causes recv loop to EOF/exit | |||
| wg.Done() | |||
| }() | |||
| wg.Add(1) | |||
| go func() { | |||
| err = c.recvLoop(c.conn) | |||
| if err == nil { | |||
| panic("zk: recvLoop should never return nil error") | |||
| } | |||
| close(closeChan) // tell send loop to exit | |||
| wg.Done() | |||
| }() | |||
| wg.Wait() | |||
| } | |||
| c.setState(StateDisconnected) | |||
| // Yeesh | |||
| if err != io.EOF && err != ErrSessionExpired && !strings.Contains(err.Error(), "use of closed network connection") { | |||
| log.Println(err) | |||
| } | |||
| select { | |||
| case <-c.shouldQuit: | |||
| c.flushRequests(ErrClosing) | |||
| return | |||
| default: | |||
| } | |||
| if err != ErrSessionExpired { | |||
| err = ErrConnectionClosed | |||
| } | |||
| c.flushRequests(err) | |||
| if c.reconnectDelay > 0 { | |||
| select { | |||
| case <-c.shouldQuit: | |||
| return | |||
| case <-time.After(c.reconnectDelay): | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func (c *Conn) flushUnsentRequests(err error) { | |||
| for { | |||
| select { | |||
| default: | |||
| return | |||
| case req := <-c.sendChan: | |||
| req.recvChan <- response{-1, err} | |||
| } | |||
| } | |||
| } | |||
| // Send error to all pending requests and clear request map | |||
| func (c *Conn) flushRequests(err error) { | |||
| c.requestsLock.Lock() | |||
| for _, req := range c.requests { | |||
| req.recvChan <- response{-1, err} | |||
| } | |||
| c.requests = make(map[int32]*request) | |||
| c.requestsLock.Unlock() | |||
| } | |||
| // Send error to all watchers and clear watchers map | |||
| func (c *Conn) invalidateWatches(err error) { | |||
| c.watchersLock.Lock() | |||
| defer c.watchersLock.Unlock() | |||
| if len(c.watchers) >= 0 { | |||
| for pathType, watchers := range c.watchers { | |||
| ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err} | |||
| for _, ch := range watchers { | |||
| ch <- ev | |||
| close(ch) | |||
| } | |||
| } | |||
| c.watchers = make(map[watchPathType][]chan Event) | |||
| } | |||
| } | |||
| func (c *Conn) sendSetWatches() { | |||
| c.watchersLock.Lock() | |||
| defer c.watchersLock.Unlock() | |||
| if len(c.watchers) == 0 { | |||
| return | |||
| } | |||
| req := &setWatchesRequest{ | |||
| RelativeZxid: c.lastZxid, | |||
| DataWatches: make([]string, 0), | |||
| ExistWatches: make([]string, 0), | |||
| ChildWatches: make([]string, 0), | |||
| } | |||
| n := 0 | |||
| for pathType, watchers := range c.watchers { | |||
| if len(watchers) == 0 { | |||
| continue | |||
| } | |||
| switch pathType.wType { | |||
| case watchTypeData: | |||
| req.DataWatches = append(req.DataWatches, pathType.path) | |||
| case watchTypeExist: | |||
| req.ExistWatches = append(req.ExistWatches, pathType.path) | |||
| case watchTypeChild: | |||
| req.ChildWatches = append(req.ChildWatches, pathType.path) | |||
| } | |||
| n++ | |||
| } | |||
| if n == 0 { | |||
| return | |||
| } | |||
| go func() { | |||
| res := &setWatchesResponse{} | |||
| _, err := c.request(opSetWatches, req, res, nil) | |||
| if err != nil { | |||
| log.Printf("Failed to set previous watches: %s", err.Error()) | |||
| } | |||
| }() | |||
| } | |||
| func (c *Conn) authenticate() error { | |||
| buf := make([]byte, 256) | |||
| // connect request | |||
| n, err := encodePacket(buf[4:], &connectRequest{ | |||
| ProtocolVersion: protocolVersion, | |||
| LastZxidSeen: c.lastZxid, | |||
| TimeOut: c.timeout, | |||
| SessionID: c.sessionID, | |||
| Passwd: c.passwd, | |||
| }) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| binary.BigEndian.PutUint32(buf[:4], uint32(n)) | |||
| _, err = c.conn.Write(buf[:n+4]) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| c.sendSetWatches() | |||
| // connect response | |||
| // package length | |||
| _, err = io.ReadFull(c.conn, buf[:4]) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| blen := int(binary.BigEndian.Uint32(buf[:4])) | |||
| if cap(buf) < blen { | |||
| buf = make([]byte, blen) | |||
| } | |||
| _, err = io.ReadFull(c.conn, buf[:blen]) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| r := connectResponse{} | |||
| _, err = decodePacket(buf[:blen], &r) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if r.SessionID == 0 { | |||
| c.sessionID = 0 | |||
| c.passwd = emptyPassword | |||
| c.lastZxid = 0 | |||
| c.setState(StateExpired) | |||
| return ErrSessionExpired | |||
| } | |||
| if c.sessionID != r.SessionID { | |||
| atomic.StoreInt32(&c.xid, 0) | |||
| } | |||
| c.timeout = r.TimeOut | |||
| c.sessionID = r.SessionID | |||
| c.passwd = r.Passwd | |||
| c.setState(StateHasSession) | |||
| return nil | |||
| } | |||
| func (c *Conn) sendLoop(conn net.Conn, closeChan <-chan bool) error { | |||
| pingTicker := time.NewTicker(c.pingInterval) | |||
| defer pingTicker.Stop() | |||
| buf := make([]byte, bufferSize) | |||
| for { | |||
| select { | |||
| case req := <-c.sendChan: | |||
| header := &requestHeader{req.xid, req.opcode} | |||
| n, err := encodePacket(buf[4:], header) | |||
| if err != nil { | |||
| req.recvChan <- response{-1, err} | |||
| continue | |||
| } | |||
| n2, err := encodePacket(buf[4+n:], req.pkt) | |||
| if err != nil { | |||
| req.recvChan <- response{-1, err} | |||
| continue | |||
| } | |||
| n += n2 | |||
| binary.BigEndian.PutUint32(buf[:4], uint32(n)) | |||
| c.requestsLock.Lock() | |||
| select { | |||
| case <-closeChan: | |||
| req.recvChan <- response{-1, ErrConnectionClosed} | |||
| c.requestsLock.Unlock() | |||
| return ErrConnectionClosed | |||
| default: | |||
| } | |||
| c.requests[req.xid] = req | |||
| c.requestsLock.Unlock() | |||
| conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) | |||
| _, err = conn.Write(buf[:n+4]) | |||
| conn.SetWriteDeadline(time.Time{}) | |||
| if err != nil { | |||
| req.recvChan <- response{-1, err} | |||
| conn.Close() | |||
| return err | |||
| } | |||
| case <-pingTicker.C: | |||
| n, err := encodePacket(buf[4:], &requestHeader{Xid: -2, Opcode: opPing}) | |||
| if err != nil { | |||
| panic("zk: opPing should never fail to serialize") | |||
| } | |||
| binary.BigEndian.PutUint32(buf[:4], uint32(n)) | |||
| conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) | |||
| _, err = conn.Write(buf[:n+4]) | |||
| conn.SetWriteDeadline(time.Time{}) | |||
| if err != nil { | |||
| conn.Close() | |||
| return err | |||
| } | |||
| case <-closeChan: | |||
| return nil | |||
| } | |||
| } | |||
| } | |||
| func (c *Conn) recvLoop(conn net.Conn) error { | |||
| buf := make([]byte, bufferSize) | |||
| for { | |||
| // package length | |||
| conn.SetReadDeadline(time.Now().Add(c.recvTimeout)) | |||
| _, err := io.ReadFull(conn, buf[:4]) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| blen := int(binary.BigEndian.Uint32(buf[:4])) | |||
| if cap(buf) < blen { | |||
| buf = make([]byte, blen) | |||
| } | |||
| _, err = io.ReadFull(conn, buf[:blen]) | |||
| conn.SetReadDeadline(time.Time{}) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| res := responseHeader{} | |||
| _, err = decodePacket(buf[:16], &res) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if res.Xid == -1 { | |||
| res := &watcherEvent{} | |||
| _, err := decodePacket(buf[16:16+blen], res) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| ev := Event{ | |||
| Type: res.Type, | |||
| State: res.State, | |||
| Path: res.Path, | |||
| Err: nil, | |||
| } | |||
| select { | |||
| case c.eventChan <- ev: | |||
| default: | |||
| } | |||
| wTypes := make([]watchType, 0, 2) | |||
| switch res.Type { | |||
| case EventNodeCreated: | |||
| wTypes = append(wTypes, watchTypeExist) | |||
| case EventNodeDeleted, EventNodeDataChanged: | |||
| wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild) | |||
| case EventNodeChildrenChanged: | |||
| wTypes = append(wTypes, watchTypeChild) | |||
| } | |||
| c.watchersLock.Lock() | |||
| for _, t := range wTypes { | |||
| wpt := watchPathType{res.Path, t} | |||
| if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 { | |||
| for _, ch := range watchers { | |||
| ch <- ev | |||
| close(ch) | |||
| } | |||
| delete(c.watchers, wpt) | |||
| } | |||
| } | |||
| c.watchersLock.Unlock() | |||
| } else if res.Xid == -2 { | |||
| // Ping response. Ignore. | |||
| } else if res.Xid < 0 { | |||
| log.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid) | |||
| } else { | |||
| if res.Zxid > 0 { | |||
| c.lastZxid = res.Zxid | |||
| } | |||
| c.requestsLock.Lock() | |||
| req, ok := c.requests[res.Xid] | |||
| if ok { | |||
| delete(c.requests, res.Xid) | |||
| } | |||
| c.requestsLock.Unlock() | |||
| if !ok { | |||
| log.Printf("Response for unknown request with xid %d", res.Xid) | |||
| } else { | |||
| if res.Err != 0 { | |||
| err = res.Err.toError() | |||
| } else { | |||
| _, err = decodePacket(buf[16:16+blen], req.recvStruct) | |||
| } | |||
| if req.recvFunc != nil { | |||
| req.recvFunc(req, &res, err) | |||
| } | |||
| req.recvChan <- response{res.Zxid, err} | |||
| if req.opcode == opClose { | |||
| return io.EOF | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func (c *Conn) nextXid() int32 { | |||
| return atomic.AddInt32(&c.xid, 1) | |||
| } | |||
| func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event { | |||
| c.watchersLock.Lock() | |||
| defer c.watchersLock.Unlock() | |||
| ch := make(chan Event, 1) | |||
| wpt := watchPathType{path, watchType} | |||
| c.watchers[wpt] = append(c.watchers[wpt], ch) | |||
| return ch | |||
| } | |||
| func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response { | |||
| rq := &request{ | |||
| xid: c.nextXid(), | |||
| opcode: opcode, | |||
| pkt: req, | |||
| recvStruct: res, | |||
| recvChan: make(chan response, 1), | |||
| recvFunc: recvFunc, | |||
| } | |||
| c.sendChan <- rq | |||
| return rq.recvChan | |||
| } | |||
| func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) { | |||
| r := <-c.queueRequest(opcode, req, res, recvFunc) | |||
| return r.zxid, r.err | |||
| } | |||
| func (c *Conn) AddAuth(scheme string, auth []byte) error { | |||
| _, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil) | |||
| return err | |||
| } | |||
| func (c *Conn) Children(path string) ([]string, Stat, error) { | |||
| res := &getChildren2Response{} | |||
| _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil) | |||
| return res.Children, &res.Stat, err | |||
| } | |||
| func (c *Conn) ChildrenW(path string) ([]string, Stat, <-chan Event, error) { | |||
| var ech <-chan Event | |||
| res := &getChildren2Response{} | |||
| _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { | |||
| if err == nil { | |||
| ech = c.addWatcher(path, watchTypeChild) | |||
| } | |||
| }) | |||
| if err != nil { | |||
| return nil, nil, nil, err | |||
| } | |||
| return res.Children, &res.Stat, ech, err | |||
| } | |||
| func (c *Conn) Get(path string) ([]byte, Stat, error) { | |||
| res := &getDataResponse{} | |||
| _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil) | |||
| return res.Data, &res.Stat, err | |||
| } | |||
| // GetW returns the contents of a znode and sets a watch | |||
| func (c *Conn) GetW(path string) ([]byte, Stat, <-chan Event, error) { | |||
| var ech <-chan Event | |||
| res := &getDataResponse{} | |||
| _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { | |||
| if err == nil { | |||
| ech = c.addWatcher(path, watchTypeData) | |||
| } | |||
| }) | |||
| if err != nil { | |||
| return nil, nil, nil, err | |||
| } | |||
| return res.Data, &res.Stat, ech, err | |||
| } | |||
| func (c *Conn) Set(path string, data []byte, version int32) (Stat, error) { | |||
| res := &setDataResponse{} | |||
| _, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil) | |||
| return &res.Stat, err | |||
| } | |||
| func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) { | |||
| res := &createResponse{} | |||
| _, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil) | |||
| return res.Path, err | |||
| } | |||
| // CreateProtectedEphemeralSequential fixes a race condition if the server crashes | |||
| // after it creates the node. On reconnect the session may still be valid so the | |||
| // ephemeral node still exists. Therefore, on reconnect we need to check if a node | |||
| // with a GUID generated on create exists. | |||
| func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) { | |||
| var guid [16]byte | |||
| _, err := io.ReadFull(rand.Reader, guid[:16]) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| guidStr := fmt.Sprintf("%x", guid) | |||
| parts := strings.Split(path, "/") | |||
| parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1]) | |||
| rootPath := strings.Join(parts[:len(parts)-1], "/") | |||
| protectedPath := strings.Join(parts, "/") | |||
| var newPath string | |||
| for i := 0; i < 3; i++ { | |||
| newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl) | |||
| switch err { | |||
| case ErrSessionExpired: | |||
| // No need to search for the node since it can't exist. Just try again. | |||
| case ErrConnectionClosed: | |||
| children, _, err := c.Children(rootPath) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| for _, p := range children { | |||
| parts := strings.Split(p, "/") | |||
| if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) { | |||
| if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr { | |||
| return rootPath + "/" + p, nil | |||
| } | |||
| } | |||
| } | |||
| case nil: | |||
| return newPath, nil | |||
| default: | |||
| return "", err | |||
| } | |||
| } | |||
| return "", err | |||
| } | |||
| func (c *Conn) Delete(path string, version int32) error { | |||
| _, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil) | |||
| return err | |||
| } | |||
| func (c *Conn) Exists(path string) (bool, Stat, error) { | |||
| res := &existsResponse{} | |||
| _, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil) | |||
| exists := true | |||
| if err == ErrNoNode { | |||
| exists = false | |||
| err = nil | |||
| } | |||
| return exists, &res.Stat, err | |||
| } | |||
| func (c *Conn) ExistsW(path string) (bool, Stat, <-chan Event, error) { | |||
| var ech <-chan Event | |||
| res := &existsResponse{} | |||
| _, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { | |||
| if err == nil { | |||
| ech = c.addWatcher(path, watchTypeData) | |||
| } else if err == ErrNoNode { | |||
| ech = c.addWatcher(path, watchTypeExist) | |||
| } | |||
| }) | |||
| exists := true | |||
| if err == ErrNoNode { | |||
| exists = false | |||
| err = nil | |||
| } | |||
| if err != nil { | |||
| return false, nil, nil, err | |||
| } | |||
| return exists, &res.Stat, ech, err | |||
| } | |||
| func (c *Conn) GetACL(path string) ([]ACL, Stat, error) { | |||
| res := &getAclResponse{} | |||
| _, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil) | |||
| return res.Acl, &res.Stat, err | |||
| } | |||
| func (c *Conn) SetACL(path string, acl []ACL, version int32) (Stat, error) { | |||
| res := &setAclResponse{} | |||
| _, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil) | |||
| return &res.Stat, err | |||
| } | |||
| func (c *Conn) Sync(path string) (string, error) { | |||
| res := &syncResponse{} | |||
| _, err := c.request(opSync, &syncRequest{Path: path}, res, nil) | |||
| return res.Path, err | |||
| } | |||
| type MultiOps struct { | |||
| Create []CreateRequest | |||
| Delete []DeleteRequest | |||
| SetData []SetDataRequest | |||
| Check []CheckVersionRequest | |||
| } | |||
| func (c *Conn) Multi(ops MultiOps) error { | |||
| req := &multiRequest{ | |||
| Ops: make([]multiRequestOp, 0, len(ops.Create)+len(ops.Delete)+len(ops.SetData)+len(ops.Check)), | |||
| DoneHeader: multiHeader{Type: -1, Done: true, Err: -1}, | |||
| } | |||
| for _, r := range ops.Create { | |||
| req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCreate, false, -1}, r}) | |||
| } | |||
| for _, r := range ops.SetData { | |||
| req.Ops = append(req.Ops, multiRequestOp{multiHeader{opSetData, false, -1}, r}) | |||
| } | |||
| for _, r := range ops.Delete { | |||
| req.Ops = append(req.Ops, multiRequestOp{multiHeader{opDelete, false, -1}, r}) | |||
| } | |||
| for _, r := range ops.Check { | |||
| req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCheck, false, -1}, r}) | |||
| } | |||
| res := &multiResponse{} | |||
| _, err := c.request(opMulti, req, res, nil) | |||
| return err | |||
| } | |||
| @@ -1,217 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "errors" | |||
| ) | |||
| const ( | |||
| protocolVersion = 0 | |||
| DefaultPort = 2181 | |||
| ) | |||
| const ( | |||
| opNotify = 0 | |||
| opCreate = 1 | |||
| opDelete = 2 | |||
| opExists = 3 | |||
| opGetData = 4 | |||
| opSetData = 5 | |||
| opGetAcl = 6 | |||
| opSetAcl = 7 | |||
| opGetChildren = 8 | |||
| opSync = 9 | |||
| opPing = 11 | |||
| opGetChildren2 = 12 | |||
| opCheck = 13 | |||
| opMulti = 14 | |||
| opClose = -11 | |||
| opSetAuth = 100 | |||
| opSetWatches = 101 | |||
| // Not in protocol, used internally | |||
| opWatcherEvent = -2 | |||
| ) | |||
| const ( | |||
| EventNodeCreated = EventType(1) | |||
| EventNodeDeleted = EventType(2) | |||
| EventNodeDataChanged = EventType(3) | |||
| EventNodeChildrenChanged = EventType(4) | |||
| EventSession = EventType(-1) | |||
| EventNotWatching = EventType(-2) | |||
| ) | |||
| var ( | |||
| eventNames = map[EventType]string{ | |||
| EventNodeCreated: "EventNodeCreated", | |||
| EventNodeDeleted: "EventNodeDeleted", | |||
| EventNodeDataChanged: "EventNodeDataChanged", | |||
| EventNodeChildrenChanged: "EventNodeChildrenChanged", | |||
| EventSession: "EventSession", | |||
| EventNotWatching: "EventNotWatching", | |||
| } | |||
| ) | |||
| const ( | |||
| StateUnknown = State(-1) | |||
| StateDisconnected = State(0) | |||
| StateConnecting = State(1) | |||
| StateSyncConnected = State(3) | |||
| StateAuthFailed = State(4) | |||
| StateConnectedReadOnly = State(5) | |||
| StateSaslAuthenticated = State(6) | |||
| StateExpired = State(-112) | |||
| // StateAuthFailed = State(-113) | |||
| StateConnected = State(100) | |||
| StateHasSession = State(101) | |||
| ) | |||
| const ( | |||
| FlagEphemeral = 1 | |||
| FlagSequence = 2 | |||
| ) | |||
| var ( | |||
| stateNames = map[State]string{ | |||
| StateUnknown: "StateUnknown", | |||
| StateDisconnected: "StateDisconnected", | |||
| StateSyncConnected: "StateSyncConnected", | |||
| StateConnectedReadOnly: "StateConnectedReadOnly", | |||
| StateSaslAuthenticated: "StateSaslAuthenticated", | |||
| StateExpired: "StateExpired", | |||
| StateAuthFailed: "StateAuthFailed", | |||
| StateConnecting: "StateConnecting", | |||
| StateConnected: "StateConnected", | |||
| StateHasSession: "StateHasSession", | |||
| } | |||
| ) | |||
| type State int32 | |||
| func (s State) String() string { | |||
| if name := stateNames[s]; name != "" { | |||
| return name | |||
| } | |||
| return "Unknown" | |||
| } | |||
| type ErrCode int32 | |||
| var ( | |||
| ErrConnectionClosed = errors.New("zk: connection closed") | |||
| ErrUnknown = errors.New("zk: unknown error") | |||
| ErrAPIError = errors.New("zk: api error") | |||
| ErrNoNode = errors.New("zk: node does not exist") | |||
| ErrNoAuth = errors.New("zk: not authenticated") | |||
| ErrBadVersion = errors.New("zk: version conflict") | |||
| ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children") | |||
| ErrNodeExists = errors.New("zk: node already exists") | |||
| ErrNotEmpty = errors.New("zk: node has children") | |||
| ErrSessionExpired = errors.New("zk: session has been expired by the server") | |||
| ErrInvalidACL = errors.New("zk: invalid ACL specified") | |||
| ErrAuthFailed = errors.New("zk: client authentication failed") | |||
| ErrClosing = errors.New("zk: zookeeper is closing") | |||
| ErrNothing = errors.New("zk: no server responsees to process") | |||
| ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored") | |||
| // ErrInvalidCallback = errors.New("zk: invalid callback specified") | |||
| errCodeToError = map[ErrCode]error{ | |||
| 0: nil, | |||
| errAPIError: ErrAPIError, | |||
| errNoNode: ErrNoNode, | |||
| errNoAuth: ErrNoAuth, | |||
| errBadVersion: ErrBadVersion, | |||
| errNoChildrenForEphemerals: ErrNoChildrenForEphemerals, | |||
| errNodeExists: ErrNodeExists, | |||
| errNotEmpty: ErrNotEmpty, | |||
| errSessionExpired: ErrSessionExpired, | |||
| // errInvalidCallback: ErrInvalidCallback, | |||
| errInvalidAcl: ErrInvalidACL, | |||
| errAuthFailed: ErrAuthFailed, | |||
| errClosing: ErrClosing, | |||
| errNothing: ErrNothing, | |||
| errSessionMoved: ErrSessionMoved, | |||
| } | |||
| ) | |||
| func (e ErrCode) toError() error { | |||
| if err, ok := errCodeToError[e]; ok { | |||
| return err | |||
| } | |||
| return ErrUnknown | |||
| } | |||
| const ( | |||
| errOk = 0 | |||
| // System and server-side errors | |||
| errSystemError = -1 | |||
| errRuntimeInconsistency = -2 | |||
| errDataInconsistency = -3 | |||
| errConnectionLoss = -4 | |||
| errMarshallingError = -5 | |||
| errUnimplemented = -6 | |||
| errOperationTimeout = -7 | |||
| errBadArguments = -8 | |||
| errInvalidState = -9 | |||
| // API errors | |||
| errAPIError = ErrCode(-100) | |||
| errNoNode = ErrCode(-101) // * | |||
| errNoAuth = ErrCode(-102) | |||
| errBadVersion = ErrCode(-103) // * | |||
| errNoChildrenForEphemerals = ErrCode(-108) | |||
| errNodeExists = ErrCode(-110) // * | |||
| errNotEmpty = ErrCode(-111) | |||
| errSessionExpired = ErrCode(-112) | |||
| errInvalidCallback = ErrCode(-113) | |||
| errInvalidAcl = ErrCode(-114) | |||
| errAuthFailed = ErrCode(-115) | |||
| errClosing = ErrCode(-116) | |||
| errNothing = ErrCode(-117) | |||
| errSessionMoved = ErrCode(-118) | |||
| ) | |||
| // Constants for ACL permissions | |||
| const ( | |||
| PermRead = 1 << iota | |||
| PermWrite | |||
| PermCreate | |||
| PermDelete | |||
| PermAdmin | |||
| PermAll = 0x1f | |||
| ) | |||
| var ( | |||
| emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} | |||
| opNames = map[int32]string{ | |||
| opNotify: "notify", | |||
| opCreate: "create", | |||
| opDelete: "delete", | |||
| opExists: "exists", | |||
| opGetData: "getData", | |||
| opSetData: "setData", | |||
| opGetAcl: "getACL", | |||
| opSetAcl: "setACL", | |||
| opGetChildren: "getChildren", | |||
| opSync: "sync", | |||
| opPing: "ping", | |||
| opGetChildren2: "getChildren2", | |||
| opCheck: "check", | |||
| opMulti: "multi", | |||
| opClose: "close", | |||
| opSetAuth: "setAuth", | |||
| opSetWatches: "setWatches", | |||
| opWatcherEvent: "watcherEvent", | |||
| } | |||
| ) | |||
| type EventType int32 | |||
| func (t EventType) String() string { | |||
| if name := eventNames[t]; name != "" { | |||
| return name | |||
| } | |||
| return "Unknown" | |||
| } | |||
| @@ -1,131 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "strconv" | |||
| "strings" | |||
| ) | |||
| var ( | |||
| ErrDeadlock = errors.New("zk: trying to acquire a lock twice") | |||
| ErrNotLocked = errors.New("zk: not locked") | |||
| ) | |||
| type Lock struct { | |||
| c *Conn | |||
| path string | |||
| acl []ACL | |||
| lockPath string | |||
| seq int | |||
| } | |||
| func NewLock(c *Conn, path string, acl []ACL) *Lock { | |||
| return &Lock{ | |||
| c: c, | |||
| path: path, | |||
| acl: acl, | |||
| } | |||
| } | |||
| func parseSeq(path string) (int, error) { | |||
| parts := strings.Split(path, "-") | |||
| return strconv.Atoi(parts[len(parts)-1]) | |||
| } | |||
| func (l *Lock) Lock() error { | |||
| if l.lockPath != "" { | |||
| return ErrDeadlock | |||
| } | |||
| prefix := fmt.Sprintf("%s/lock-", l.path) | |||
| path := "" | |||
| var err error | |||
| for i := 0; i < 3; i++ { | |||
| path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl) | |||
| if err == ErrNoNode { | |||
| // Create parent node. | |||
| parts := strings.Split(l.path, "/") | |||
| pth := "" | |||
| for _, p := range parts[1:] { | |||
| pth += "/" + p | |||
| _, err := l.c.Create(pth, []byte{}, 0, l.acl) | |||
| if err != nil && err != ErrNodeExists { | |||
| return err | |||
| } | |||
| } | |||
| } else if err == nil { | |||
| break | |||
| } else { | |||
| return err | |||
| } | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| seq, err := parseSeq(path) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for { | |||
| children, _, err := l.c.Children(l.path) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| lowestSeq := seq | |||
| prevSeq := 0 | |||
| prevSeqPath := "" | |||
| for _, p := range children { | |||
| s, err := parseSeq(p) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if s < lowestSeq { | |||
| lowestSeq = s | |||
| } | |||
| if s < seq && s > prevSeq { | |||
| prevSeq = s | |||
| prevSeqPath = p | |||
| } | |||
| } | |||
| if seq == lowestSeq { | |||
| // Acquired the lock | |||
| break | |||
| } | |||
| // Wait on the node next in line for the lock | |||
| _, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath) | |||
| if err != nil && err != ErrNoNode { | |||
| return err | |||
| } else if err != nil && err == ErrNoNode { | |||
| // try again | |||
| continue | |||
| } | |||
| ev := <-ch | |||
| if ev.Err != nil { | |||
| return ev.Err | |||
| } | |||
| } | |||
| l.seq = seq | |||
| l.lockPath = path | |||
| return nil | |||
| } | |||
| func (l *Lock) Unlock() error { | |||
| if l.lockPath == "" { | |||
| return ErrNotLocked | |||
| } | |||
| if err := l.c.Delete(l.lockPath, -1); err != nil { | |||
| return err | |||
| } | |||
| l.lockPath = "" | |||
| l.seq = 0 | |||
| return nil | |||
| } | |||
| @@ -1,113 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "fmt" | |||
| "io/ioutil" | |||
| "math/rand" | |||
| "os" | |||
| "path/filepath" | |||
| "time" | |||
| ) | |||
| type TestServer struct { | |||
| Port int | |||
| Path string | |||
| Srv *Server | |||
| } | |||
| type TestCluster struct { | |||
| Path string | |||
| Servers []TestServer | |||
| } | |||
| func StartTestCluster(size int) (*TestCluster, error) { | |||
| tmpPath, err := ioutil.TempDir("", "gozk") | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| success := false | |||
| startPort := int(rand.Int31n(6000) + 10000) | |||
| cluster := &TestCluster{Path: tmpPath} | |||
| defer func() { | |||
| if !success { | |||
| cluster.Stop() | |||
| } | |||
| }() | |||
| for serverN := 0; serverN < size; serverN++ { | |||
| srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN)) | |||
| if err := os.Mkdir(srvPath, 0700); err != nil { | |||
| return nil, err | |||
| } | |||
| port := startPort + serverN*3 | |||
| cfg := ServerConfig{ | |||
| ClientPort: port, | |||
| DataDir: srvPath, | |||
| } | |||
| for i := 0; i < size; i++ { | |||
| cfg.Servers = append(cfg.Servers, ServerConfigServer{ | |||
| ID: i + 1, | |||
| Host: "127.0.0.1", | |||
| PeerPort: startPort + i*3 + 1, | |||
| LeaderElectionPort: startPort + i*3 + 2, | |||
| }) | |||
| } | |||
| cfgPath := filepath.Join(srvPath, "zoo.cfg") | |||
| fi, err := os.Create(cfgPath) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = cfg.Marshall(fi) | |||
| fi.Close() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| fi, err = os.Create(filepath.Join(srvPath, "myid")) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| _, err = fmt.Fprintf(fi, "%d\n", serverN+1) | |||
| fi.Close() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| srv := &Server{ | |||
| ConfigPath: cfgPath, | |||
| } | |||
| if err := srv.Start(); err != nil { | |||
| fmt.Println(err) | |||
| return nil, err | |||
| } | |||
| cluster.Servers = append(cluster.Servers, TestServer{ | |||
| Path: srvPath, | |||
| Port: cfg.ClientPort, | |||
| Srv: srv, | |||
| }) | |||
| } | |||
| success = true | |||
| time.Sleep(time.Second) // Give the server time to become active. Should probably actually attempt to connect to verify. | |||
| return cluster, nil | |||
| } | |||
| func (ts *TestCluster) Connect(idx int) (*Conn, error) { | |||
| zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", ts.Servers[idx].Port)}, time.Second*15) | |||
| return zk, err | |||
| } | |||
| func (ts *TestCluster) ConnectAll() (*Conn, error) { | |||
| hosts := make([]string, len(ts.Servers)) | |||
| for i, srv := range ts.Servers { | |||
| hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port) | |||
| } | |||
| zk, _, err := Connect(hosts, time.Second*15) | |||
| return zk, err | |||
| } | |||
| func (ts *TestCluster) Stop() error { | |||
| for _, srv := range ts.Servers { | |||
| srv.Srv.Stop() | |||
| } | |||
| defer os.RemoveAll(ts.Path) | |||
| return nil | |||
| } | |||
| @@ -1,142 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "os" | |||
| "os/exec" | |||
| "path/filepath" | |||
| ) | |||
| type ErrMissingServerConfigField string | |||
| func (e ErrMissingServerConfigField) Error() string { | |||
| return fmt.Sprintf("zk: missing server config field '%s'", string(e)) | |||
| } | |||
| const ( | |||
| DefaultServerTickTime = 2000 | |||
| DefaultServerInitLimit = 10 | |||
| DefaultServerSyncLimit = 5 | |||
| DefaultServerAutoPurgeSnapRetainCount = 3 | |||
| DefaultPeerPort = 2888 | |||
| DefaultLeaderElectionPort = 3888 | |||
| ) | |||
| type ServerConfigServer struct { | |||
| ID int | |||
| Host string | |||
| PeerPort int | |||
| LeaderElectionPort int | |||
| } | |||
| type ServerConfig struct { | |||
| TickTime int // Number of milliseconds of each tick | |||
| InitLimit int // Number of ticks that the initial synchronization phase can take | |||
| SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement | |||
| DataDir string // Direcrory where the snapshot is stored | |||
| ClientPort int // Port at which clients will connect | |||
| AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir | |||
| AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge) | |||
| Servers []ServerConfigServer | |||
| } | |||
| func (sc ServerConfig) Marshall(w io.Writer) error { | |||
| if sc.DataDir == "" { | |||
| return ErrMissingServerConfigField("dataDir") | |||
| } | |||
| fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir) | |||
| if sc.TickTime <= 0 { | |||
| sc.TickTime = DefaultServerTickTime | |||
| } | |||
| fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime) | |||
| if sc.InitLimit <= 0 { | |||
| sc.InitLimit = DefaultServerInitLimit | |||
| } | |||
| fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit) | |||
| if sc.SyncLimit <= 0 { | |||
| sc.SyncLimit = DefaultServerSyncLimit | |||
| } | |||
| fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit) | |||
| if sc.ClientPort <= 0 { | |||
| sc.ClientPort = DefaultPort | |||
| } | |||
| fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort) | |||
| if sc.AutoPurgePurgeInterval > 0 { | |||
| if sc.AutoPurgeSnapRetainCount <= 0 { | |||
| sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount | |||
| } | |||
| fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount) | |||
| fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval) | |||
| } | |||
| if len(sc.Servers) > 0 { | |||
| for _, srv := range sc.Servers { | |||
| if srv.PeerPort <= 0 { | |||
| srv.PeerPort = DefaultPeerPort | |||
| } | |||
| if srv.LeaderElectionPort <= 0 { | |||
| srv.LeaderElectionPort = DefaultLeaderElectionPort | |||
| } | |||
| fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| var jarSearchPaths = []string{ | |||
| "zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", | |||
| "../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", | |||
| "/usr/share/java/zookeeper-*.jar", | |||
| "/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", | |||
| "/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar", | |||
| } | |||
| func findZookeeperFatJar() string { | |||
| var paths []string | |||
| zkPath := os.Getenv("ZOOKEEPER_PATH") | |||
| if zkPath == "" { | |||
| paths = jarSearchPaths | |||
| } else { | |||
| paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")} | |||
| } | |||
| for _, path := range paths { | |||
| matches, _ := filepath.Glob(path) | |||
| // TODO: could sort by version and pick latest | |||
| if len(matches) > 0 { | |||
| return matches[0] | |||
| } | |||
| } | |||
| return "" | |||
| } | |||
| type Server struct { | |||
| JarPath string | |||
| ConfigPath string | |||
| cmd *exec.Cmd | |||
| } | |||
| func (srv *Server) Start() error { | |||
| if srv.JarPath == "" { | |||
| srv.JarPath = findZookeeperFatJar() | |||
| if srv.JarPath == "" { | |||
| return fmt.Errorf("zk: unable to find server jar") | |||
| } | |||
| } | |||
| srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath) | |||
| // srv.cmd.Stdout = os.Stdout | |||
| // srv.cmd.Stderr = os.Stderr | |||
| err := srv.cmd.Start() | |||
| if err != nil { | |||
| fmt.Println("start failed", err) | |||
| } | |||
| fmt.Println("start zookeeper ok") | |||
| return err | |||
| } | |||
| func (srv *Server) Stop() error { | |||
| srv.cmd.Process.Signal(os.Kill) | |||
| return srv.cmd.Wait() | |||
| } | |||
| @@ -1,662 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "reflect" | |||
| "runtime" | |||
| "time" | |||
| ) | |||
| var ( | |||
| ErrUnhandledFieldType = errors.New("zk: unhandled field type") | |||
| ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct") | |||
| ErrShortBuffer = errors.New("zk: buffer too small") | |||
| ) | |||
| type ACL struct { | |||
| Perms int32 | |||
| Scheme string | |||
| ID string | |||
| } | |||
| type zkstat struct { | |||
| ZCzxid int64 // The zxid of the change that caused this znode to be created. | |||
| ZMzxid int64 // The zxid of the change that last modified this znode. | |||
| ZCtime int64 // The time in milliseconds from epoch when this znode was created. | |||
| ZMtime int64 // The time in milliseconds from epoch when this znode was last modified. | |||
| ZVersion int32 // The number of changes to the data of this znode. | |||
| ZCversion int32 // The number of changes to the children of this znode. | |||
| ZAversion int32 // The number of changes to the ACL of this znode. | |||
| ZEphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero. | |||
| ZDataLength int32 // The length of the data field of this znode. | |||
| ZNumChildren int32 // The number of children of this znode. | |||
| ZPzxid int64 // last modified children | |||
| } | |||
| type Stat interface { | |||
| Czxid() int64 | |||
| Mzxid() int64 | |||
| CTime() time.Time | |||
| MTime() time.Time | |||
| Version() int | |||
| CVersion() int | |||
| AVersion() int | |||
| EphemeralOwner() int64 | |||
| DataLength() int | |||
| NumChildren() int | |||
| Pzxid() int64 | |||
| } | |||
| // Czxid returns the zxid of the change that caused the node to be created. | |||
| func (s *zkstat) Czxid() int64 { | |||
| return s.ZCzxid | |||
| } | |||
| // Mzxid returns the zxid of the change that last modified the node. | |||
| func (s *zkstat) Mzxid() int64 { | |||
| return s.ZMzxid | |||
| } | |||
| func millisec2time(ms int64) time.Time { | |||
| return time.Unix(ms/1e3, ms%1e3*1e6) | |||
| } | |||
| // CTime returns the time (at millisecond resolution) when the node was | |||
| // created. | |||
| func (s *zkstat) CTime() time.Time { | |||
| return millisec2time(s.ZCtime) | |||
| } | |||
| // MTime returns the time (at millisecond resolution) when the node was | |||
| // last modified. | |||
| func (s *zkstat) MTime() time.Time { | |||
| return millisec2time(int64(s.ZMtime)) | |||
| } | |||
| // Version returns the number of changes to the data of the node. | |||
| func (s *zkstat) Version() int { | |||
| return int(s.ZVersion) | |||
| } | |||
| // CVersion returns the number of changes to the children of the node. | |||
| // This only changes when children are created or removed. | |||
| func (s *zkstat) CVersion() int { | |||
| return int(s.ZCversion) | |||
| } | |||
| // AVersion returns the number of changes to the ACL of the node. | |||
| func (s *zkstat) AVersion() int { | |||
| return int(s.ZAversion) | |||
| } | |||
| // If the node is an ephemeral node, EphemeralOwner returns the session id | |||
| // of the owner of the node; otherwise it will return zero. | |||
| func (s *zkstat) EphemeralOwner() int64 { | |||
| return int64(s.ZEphemeralOwner) | |||
| } | |||
| // DataLength returns the length of the data in the node in bytes. | |||
| func (s *zkstat) DataLength() int { | |||
| return int(s.ZDataLength) | |||
| } | |||
| // NumChildren returns the number of children of the node. | |||
| func (s *zkstat) NumChildren() int { | |||
| return int(s.ZNumChildren) | |||
| } | |||
| // Pzxid returns the Pzxid of the node, whatever that is. | |||
| func (s *zkstat) Pzxid() int64 { | |||
| return int64(s.ZPzxid) | |||
| } | |||
| type requestHeader struct { | |||
| Xid int32 | |||
| Opcode int32 | |||
| } | |||
| type responseHeader struct { | |||
| Xid int32 | |||
| Zxid int64 | |||
| Err ErrCode | |||
| } | |||
| type multiHeader struct { | |||
| Type int32 | |||
| Done bool | |||
| Err ErrCode | |||
| } | |||
| type auth struct { | |||
| Type int32 | |||
| Scheme string | |||
| Auth []byte | |||
| } | |||
| // Generic request structs | |||
| type pathRequest struct { | |||
| Path string | |||
| } | |||
| type PathVersionRequest struct { | |||
| Path string | |||
| Version int32 | |||
| } | |||
| type pathWatchRequest struct { | |||
| Path string | |||
| Watch bool | |||
| } | |||
| type pathResponse struct { | |||
| Path string | |||
| } | |||
| type statResponse struct { | |||
| Stat zkstat | |||
| } | |||
| // | |||
| type CheckVersionRequest PathVersionRequest | |||
| type closeRequest struct{} | |||
| type closeResponse struct{} | |||
| type connectRequest struct { | |||
| ProtocolVersion int32 | |||
| LastZxidSeen int64 | |||
| TimeOut int32 | |||
| SessionID int64 | |||
| Passwd []byte | |||
| } | |||
| type connectResponse struct { | |||
| ProtocolVersion int32 | |||
| TimeOut int32 | |||
| SessionID int64 | |||
| Passwd []byte | |||
| } | |||
| type CreateRequest struct { | |||
| Path string | |||
| Data []byte | |||
| Acl []ACL | |||
| Flags int32 | |||
| } | |||
| type createResponse pathResponse | |||
| type DeleteRequest PathVersionRequest | |||
| type deleteResponse struct{} | |||
| type errorResponse struct { | |||
| Err int32 | |||
| } | |||
| type existsRequest pathWatchRequest | |||
| type existsResponse statResponse | |||
| type getAclRequest pathRequest | |||
| type getAclResponse struct { | |||
| Acl []ACL | |||
| Stat zkstat | |||
| } | |||
| type getChildrenRequest pathRequest | |||
| type getChildrenResponse struct { | |||
| Children []string | |||
| } | |||
| type getChildren2Request pathWatchRequest | |||
| type getChildren2Response struct { | |||
| Children []string | |||
| Stat zkstat | |||
| } | |||
| type getDataRequest pathWatchRequest | |||
| type getDataResponse struct { | |||
| Data []byte | |||
| Stat zkstat | |||
| } | |||
| type getMaxChildrenRequest pathRequest | |||
| type getMaxChildrenResponse struct { | |||
| Max int32 | |||
| } | |||
| type getSaslRequest struct { | |||
| Token []byte | |||
| } | |||
| type pingRequest struct{} | |||
| type pingResponse struct{} | |||
| type setAclRequest struct { | |||
| Path string | |||
| Acl []ACL | |||
| Version int32 | |||
| } | |||
| type setAclResponse statResponse | |||
| type SetDataRequest struct { | |||
| Path string | |||
| Data []byte | |||
| Version int32 | |||
| } | |||
| type setDataResponse statResponse | |||
| type setMaxChildren struct { | |||
| Path string | |||
| Max int32 | |||
| } | |||
| type setSaslRequest struct { | |||
| Token string | |||
| } | |||
| type setSaslResponse struct { | |||
| Token string | |||
| } | |||
| type setWatchesRequest struct { | |||
| RelativeZxid int64 | |||
| DataWatches []string | |||
| ExistWatches []string | |||
| ChildWatches []string | |||
| } | |||
| type setWatchesResponse struct{} | |||
| type syncRequest pathRequest | |||
| type syncResponse pathResponse | |||
| type setAuthRequest auth | |||
| type setAuthResponse struct{} | |||
| type multiRequestOp struct { | |||
| Header multiHeader | |||
| Op interface{} | |||
| } | |||
| type multiRequest struct { | |||
| Ops []multiRequestOp | |||
| DoneHeader multiHeader | |||
| } | |||
| type multiResponseOp struct { | |||
| Header multiHeader | |||
| String string | |||
| Stat *zkstat | |||
| } | |||
| type multiResponse struct { | |||
| Ops []multiResponseOp | |||
| DoneHeader multiHeader | |||
| } | |||
| func (r *multiRequest) Encode(buf []byte) (int, error) { | |||
| total := 0 | |||
| for _, op := range r.Ops { | |||
| op.Header.Done = false | |||
| n, err := encodePacketValue(buf[total:], reflect.ValueOf(op)) | |||
| if err != nil { | |||
| return total, err | |||
| } | |||
| total += n | |||
| } | |||
| r.DoneHeader.Done = true | |||
| n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader)) | |||
| if err != nil { | |||
| return total, err | |||
| } | |||
| total += n | |||
| return total, nil | |||
| } | |||
| func (r *multiRequest) Decode(buf []byte) (int, error) { | |||
| r.Ops = make([]multiRequestOp, 0) | |||
| r.DoneHeader = multiHeader{-1, true, -1} | |||
| total := 0 | |||
| for { | |||
| header := &multiHeader{} | |||
| n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) | |||
| if err != nil { | |||
| return total, err | |||
| } | |||
| total += n | |||
| if header.Done { | |||
| r.DoneHeader = *header | |||
| break | |||
| } | |||
| req := requestStructForOp(header.Type) | |||
| if req == nil { | |||
| return total, ErrAPIError | |||
| } | |||
| n, err = decodePacketValue(buf[total:], reflect.ValueOf(req)) | |||
| if err != nil { | |||
| return total, err | |||
| } | |||
| total += n | |||
| r.Ops = append(r.Ops, multiRequestOp{*header, req}) | |||
| } | |||
| return total, nil | |||
| } | |||
| func (r *multiResponse) Decode(buf []byte) (int, error) { | |||
| r.Ops = make([]multiResponseOp, 0) | |||
| r.DoneHeader = multiHeader{-1, true, -1} | |||
| total := 0 | |||
| for { | |||
| header := &multiHeader{} | |||
| n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) | |||
| if err != nil { | |||
| return total, err | |||
| } | |||
| total += n | |||
| if header.Done { | |||
| r.DoneHeader = *header | |||
| break | |||
| } | |||
| res := multiResponseOp{Header: *header} | |||
| var w reflect.Value | |||
| switch header.Type { | |||
| default: | |||
| return total, ErrAPIError | |||
| case opCreate: | |||
| w = reflect.ValueOf(&res.String) | |||
| case opSetData: | |||
| res.Stat = new(zkstat) | |||
| w = reflect.ValueOf(res.Stat) | |||
| case opCheck, opDelete: | |||
| } | |||
| if w.IsValid() { | |||
| n, err := decodePacketValue(buf[total:], w) | |||
| if err != nil { | |||
| return total, err | |||
| } | |||
| total += n | |||
| } | |||
| r.Ops = append(r.Ops, res) | |||
| } | |||
| return total, nil | |||
| } | |||
| type watcherEvent struct { | |||
| Type EventType | |||
| State State | |||
| Path string | |||
| } | |||
| type decoder interface { | |||
| Decode(buf []byte) (int, error) | |||
| } | |||
| type encoder interface { | |||
| Encode(buf []byte) (int, error) | |||
| } | |||
| func decodePacket(buf []byte, st interface{}) (n int, err error) { | |||
| defer func() { | |||
| if r := recover(); r != nil { | |||
| if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { | |||
| err = ErrShortBuffer | |||
| } else { | |||
| panic(r) | |||
| } | |||
| } | |||
| }() | |||
| v := reflect.ValueOf(st) | |||
| if v.Kind() != reflect.Ptr || v.IsNil() { | |||
| return 0, ErrPtrExpected | |||
| } | |||
| return decodePacketValue(buf, v) | |||
| } | |||
| func decodePacketValue(buf []byte, v reflect.Value) (int, error) { | |||
| rv := v | |||
| kind := v.Kind() | |||
| if kind == reflect.Ptr { | |||
| if v.IsNil() { | |||
| v.Set(reflect.New(v.Type().Elem())) | |||
| } | |||
| v = v.Elem() | |||
| kind = v.Kind() | |||
| } | |||
| n := 0 | |||
| switch kind { | |||
| default: | |||
| return n, ErrUnhandledFieldType | |||
| case reflect.Struct: | |||
| if de, ok := rv.Interface().(decoder); ok { | |||
| return de.Decode(buf) | |||
| } else if de, ok := v.Interface().(decoder); ok { | |||
| return de.Decode(buf) | |||
| } else { | |||
| for i := 0; i < v.NumField(); i++ { | |||
| field := v.Field(i) | |||
| n2, err := decodePacketValue(buf[n:], field) | |||
| n += n2 | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| } | |||
| } | |||
| case reflect.Bool: | |||
| v.SetBool(buf[n] != 0) | |||
| n++ | |||
| case reflect.Int32: | |||
| v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4]))) | |||
| n += 4 | |||
| case reflect.Int64: | |||
| v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8]))) | |||
| n += 8 | |||
| case reflect.String: | |||
| ln := int(binary.BigEndian.Uint32(buf[n : n+4])) | |||
| v.SetString(string(buf[n+4 : n+4+ln])) | |||
| n += 4 + ln | |||
| case reflect.Slice: | |||
| switch v.Type().Elem().Kind() { | |||
| default: | |||
| count := int(binary.BigEndian.Uint32(buf[n : n+4])) | |||
| n += 4 | |||
| values := reflect.MakeSlice(v.Type(), count, count) | |||
| v.Set(values) | |||
| for i := 0; i < count; i++ { | |||
| n2, err := decodePacketValue(buf[n:], values.Index(i)) | |||
| n += n2 | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| } | |||
| case reflect.Uint8: | |||
| ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4]))) | |||
| if ln < 0 { | |||
| n += 4 | |||
| v.SetBytes(nil) | |||
| } else { | |||
| bytes := make([]byte, ln) | |||
| copy(bytes, buf[n+4:n+4+ln]) | |||
| v.SetBytes(bytes) | |||
| n += 4 + ln | |||
| } | |||
| } | |||
| } | |||
| return n, nil | |||
| } | |||
| func encodePacket(buf []byte, st interface{}) (n int, err error) { | |||
| defer func() { | |||
| if r := recover(); r != nil { | |||
| if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { | |||
| err = ErrShortBuffer | |||
| } else { | |||
| panic(r) | |||
| } | |||
| } | |||
| }() | |||
| v := reflect.ValueOf(st) | |||
| if v.Kind() != reflect.Ptr || v.IsNil() { | |||
| return 0, ErrPtrExpected | |||
| } | |||
| return encodePacketValue(buf, v) | |||
| } | |||
| func encodePacketValue(buf []byte, v reflect.Value) (int, error) { | |||
| rv := v | |||
| for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { | |||
| v = v.Elem() | |||
| } | |||
| n := 0 | |||
| switch v.Kind() { | |||
| default: | |||
| return n, ErrUnhandledFieldType | |||
| case reflect.Struct: | |||
| if en, ok := rv.Interface().(encoder); ok { | |||
| return en.Encode(buf) | |||
| } else if en, ok := v.Interface().(encoder); ok { | |||
| return en.Encode(buf) | |||
| } else { | |||
| for i := 0; i < v.NumField(); i++ { | |||
| field := v.Field(i) | |||
| n2, err := encodePacketValue(buf[n:], field) | |||
| n += n2 | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| } | |||
| } | |||
| case reflect.Bool: | |||
| if v.Bool() { | |||
| buf[n] = 1 | |||
| } else { | |||
| buf[n] = 0 | |||
| } | |||
| n++ | |||
| case reflect.Int32: | |||
| binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int())) | |||
| n += 4 | |||
| case reflect.Int64: | |||
| binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int())) | |||
| n += 8 | |||
| case reflect.String: | |||
| str := v.String() | |||
| binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str))) | |||
| copy(buf[n+4:n+4+len(str)], []byte(str)) | |||
| n += 4 + len(str) | |||
| case reflect.Slice: | |||
| switch v.Type().Elem().Kind() { | |||
| default: | |||
| count := v.Len() | |||
| startN := n | |||
| n += 4 | |||
| for i := 0; i < count; i++ { | |||
| n2, err := encodePacketValue(buf[n:], v.Index(i)) | |||
| n += n2 | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| } | |||
| binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count)) | |||
| case reflect.Uint8: | |||
| if v.IsNil() { | |||
| binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff)) | |||
| n += 4 | |||
| } else { | |||
| bytes := v.Bytes() | |||
| binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes))) | |||
| copy(buf[n+4:n+4+len(bytes)], bytes) | |||
| n += 4 + len(bytes) | |||
| } | |||
| } | |||
| } | |||
| return n, nil | |||
| } | |||
| func requestStructForOp(op int32) interface{} { | |||
| switch op { | |||
| case opClose: | |||
| return &closeRequest{} | |||
| case opCreate: | |||
| return &CreateRequest{} | |||
| case opDelete: | |||
| return &DeleteRequest{} | |||
| case opExists: | |||
| return &existsRequest{} | |||
| case opGetAcl: | |||
| return &getAclRequest{} | |||
| case opGetChildren: | |||
| return &getChildrenRequest{} | |||
| case opGetChildren2: | |||
| return &getChildren2Request{} | |||
| case opGetData: | |||
| return &getDataRequest{} | |||
| case opPing: | |||
| return &pingRequest{} | |||
| case opSetAcl: | |||
| return &setAclRequest{} | |||
| case opSetData: | |||
| return &SetDataRequest{} | |||
| case opSetWatches: | |||
| return &setWatchesRequest{} | |||
| case opSync: | |||
| return &syncRequest{} | |||
| case opSetAuth: | |||
| return &setAuthRequest{} | |||
| case opCheck: | |||
| return &CheckVersionRequest{} | |||
| case opMulti: | |||
| return &multiRequest{} | |||
| } | |||
| return nil | |||
| } | |||
| func responseStructForOp(op int32) interface{} { | |||
| switch op { | |||
| case opClose: | |||
| return &closeResponse{} | |||
| case opCreate: | |||
| return &createResponse{} | |||
| case opDelete: | |||
| return &deleteResponse{} | |||
| case opExists: | |||
| return &existsResponse{} | |||
| case opGetAcl: | |||
| return &getAclResponse{} | |||
| case opGetChildren: | |||
| return &getChildrenResponse{} | |||
| case opGetChildren2: | |||
| return &getChildren2Response{} | |||
| case opGetData: | |||
| return &getDataResponse{} | |||
| case opPing: | |||
| return &pingResponse{} | |||
| case opSetAcl: | |||
| return &setAclResponse{} | |||
| case opSetData: | |||
| return &setDataResponse{} | |||
| case opSetWatches: | |||
| return &setWatchesResponse{} | |||
| case opSync: | |||
| return &syncResponse{} | |||
| case opWatcherEvent: | |||
| return &watcherEvent{} | |||
| case opSetAuth: | |||
| return &setAuthResponse{} | |||
| // case opCheck: | |||
| // return &checkVersionResponse{} | |||
| case opMulti: | |||
| return &multiResponse{} | |||
| } | |||
| return nil | |||
| } | |||
| @@ -1,149 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "encoding/binary" | |||
| "fmt" | |||
| "io" | |||
| "net" | |||
| "sync" | |||
| ) | |||
| var ( | |||
| requests = make(map[int32]int32) // Map of Xid -> Opcode | |||
| requestsLock = &sync.Mutex{} | |||
| ) | |||
| func trace(conn1, conn2 net.Conn, client bool) { | |||
| defer conn1.Close() | |||
| defer conn2.Close() | |||
| buf := make([]byte, 10*1024) | |||
| init := true | |||
| for { | |||
| _, err := io.ReadFull(conn1, buf[:4]) | |||
| if err != nil { | |||
| fmt.Println("1>", client, err) | |||
| return | |||
| } | |||
| blen := int(binary.BigEndian.Uint32(buf[:4])) | |||
| _, err = io.ReadFull(conn1, buf[4:4+blen]) | |||
| if err != nil { | |||
| fmt.Println("2>", client, err) | |||
| return | |||
| } | |||
| var cr interface{} | |||
| opcode := int32(-1) | |||
| readHeader := true | |||
| if client { | |||
| if init { | |||
| cr = &connectRequest{} | |||
| readHeader = false | |||
| } else { | |||
| xid := int32(binary.BigEndian.Uint32(buf[4:8])) | |||
| opcode = int32(binary.BigEndian.Uint32(buf[8:12])) | |||
| requestsLock.Lock() | |||
| requests[xid] = opcode | |||
| requestsLock.Unlock() | |||
| cr = requestStructForOp(opcode) | |||
| if cr == nil { | |||
| fmt.Printf("Unknown opcode %d\n", opcode) | |||
| } | |||
| } | |||
| } else { | |||
| if init { | |||
| cr = &connectResponse{} | |||
| readHeader = false | |||
| } else { | |||
| xid := int32(binary.BigEndian.Uint32(buf[4:8])) | |||
| zxid := int64(binary.BigEndian.Uint64(buf[8:16])) | |||
| errnum := int32(binary.BigEndian.Uint32(buf[16:20])) | |||
| if xid != -1 || zxid != -1 { | |||
| requestsLock.Lock() | |||
| found := false | |||
| opcode, found = requests[xid] | |||
| if !found { | |||
| println("WEFWEFEW") | |||
| opcode = 0 | |||
| } | |||
| delete(requests, xid) | |||
| requestsLock.Unlock() | |||
| } else { | |||
| opcode = opWatcherEvent | |||
| } | |||
| cr = responseStructForOp(opcode) | |||
| if cr == nil { | |||
| fmt.Printf("Unknown opcode %d\n", opcode) | |||
| } | |||
| if errnum != 0 { | |||
| cr = &struct{}{} | |||
| } | |||
| } | |||
| } | |||
| opname := "." | |||
| if opcode != -1 { | |||
| opname = opNames[opcode] | |||
| } | |||
| if cr == nil { | |||
| fmt.Printf("%+v %s %+v\n", client, opname, buf[4:4+blen]) | |||
| } else { | |||
| n := 4 | |||
| hdrStr := "" | |||
| if readHeader { | |||
| var hdr interface{} | |||
| if client { | |||
| hdr = &requestHeader{} | |||
| } else { | |||
| hdr = &responseHeader{} | |||
| } | |||
| if n2, err := decodePacket(buf[n:n+blen], hdr); err != nil { | |||
| fmt.Println(err) | |||
| } else { | |||
| n += n2 | |||
| } | |||
| hdrStr = fmt.Sprintf(" %+v", hdr) | |||
| } | |||
| if _, err := decodePacket(buf[n:n+blen], cr); err != nil { | |||
| fmt.Println(err) | |||
| } | |||
| fmt.Printf("%+v %s%s %+v\n", client, opname, hdrStr, cr) | |||
| } | |||
| init = false | |||
| written, err := conn2.Write(buf[:4+blen]) | |||
| if err != nil { | |||
| fmt.Println("3>", client, err) | |||
| return | |||
| } else if written != 4+blen { | |||
| fmt.Printf("Written != read: %d != %d\n", written, blen) | |||
| return | |||
| } | |||
| } | |||
| } | |||
| func handleConnection(addr string, conn net.Conn) { | |||
| zkConn, err := net.Dial("tcp", addr) | |||
| if err != nil { | |||
| fmt.Println(err) | |||
| return | |||
| } | |||
| go trace(conn, zkConn, true) | |||
| trace(zkConn, conn, false) | |||
| } | |||
| func StartTracer(listenAddr, serverAddr string) { | |||
| ln, err := net.Listen("tcp", listenAddr) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| for { | |||
| conn, err := ln.Accept() | |||
| if err != nil { | |||
| fmt.Println(err) | |||
| continue | |||
| } | |||
| go handleConnection(serverAddr, conn) | |||
| } | |||
| } | |||
| @@ -1,40 +0,0 @@ | |||
| package zk | |||
| import ( | |||
| "crypto/sha1" | |||
| "encoding/base64" | |||
| "fmt" | |||
| "math/rand" | |||
| ) | |||
| // AuthACL produces an ACL list containing a single ACL which uses the | |||
| // provided permissions, with the scheme "auth", and ID "", which is used | |||
| // by ZooKeeper to represent any authenticated user. | |||
| func AuthACL(perms int32) []ACL { | |||
| return []ACL{{perms, "auth", ""}} | |||
| } | |||
| // WorldACL produces an ACL list containing a single ACL which uses the | |||
| // provided permissions, with the scheme "world", and ID "anyone", which | |||
| // is used by ZooKeeper to represent any user at all. | |||
| func WorldACL(perms int32) []ACL { | |||
| return []ACL{{perms, "world", "anyone"}} | |||
| } | |||
| func DigestACL(perms int32, user, password string) []ACL { | |||
| userPass := []byte(fmt.Sprintf("%s:%s", user, password)) | |||
| h := sha1.New() | |||
| if n, err := h.Write(userPass); err != nil || n != len(userPass) { | |||
| panic("SHA1 failed") | |||
| } | |||
| digest := base64.StdEncoding.EncodeToString(h.Sum(nil)) | |||
| return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}} | |||
| } | |||
| // stringShuffle performs a Fisher-Yates shuffle on a slice of strings | |||
| func stringShuffle(s []string) { | |||
| for i := len(s) - 1; i > 0; i-- { | |||
| j := rand.Intn(i + 1) | |||
| s[i], s[j] = s[j], s[i] | |||
| } | |||
| } | |||
| @@ -1,165 +0,0 @@ | |||
| GNU LESSER GENERAL PUBLIC LICENSE | |||
| Version 3, 29 June 2007 | |||
| Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | |||
| Everyone is permitted to copy and distribute verbatim copies | |||
| of this license document, but changing it is not allowed. | |||
| This version of the GNU Lesser General Public License incorporates | |||
| the terms and conditions of version 3 of the GNU General Public | |||
| License, supplemented by the additional permissions listed below. | |||
| 0. Additional Definitions. | |||
| As used herein, "this License" refers to version 3 of the GNU Lesser | |||
| General Public License, and the "GNU GPL" refers to version 3 of the GNU | |||
| General Public License. | |||
| "The Library" refers to a covered work governed by this License, | |||
| other than an Application or a Combined Work as defined below. | |||
| An "Application" is any work that makes use of an interface provided | |||
| by the Library, but which is not otherwise based on the Library. | |||
| Defining a subclass of a class defined by the Library is deemed a mode | |||
| of using an interface provided by the Library. | |||
| A "Combined Work" is a work produced by combining or linking an | |||
| Application with the Library. The particular version of the Library | |||
| with which the Combined Work was made is also called the "Linked | |||
| Version". | |||
| The "Minimal Corresponding Source" for a Combined Work means the | |||
| Corresponding Source for the Combined Work, excluding any source code | |||
| for portions of the Combined Work that, considered in isolation, are | |||
| based on the Application, and not on the Linked Version. | |||
| The "Corresponding Application Code" for a Combined Work means the | |||
| object code and/or source code for the Application, including any data | |||
| and utility programs needed for reproducing the Combined Work from the | |||
| Application, but excluding the System Libraries of the Combined Work. | |||
| 1. Exception to Section 3 of the GNU GPL. | |||
| You may convey a covered work under sections 3 and 4 of this License | |||
| without being bound by section 3 of the GNU GPL. | |||
| 2. Conveying Modified Versions. | |||
| If you modify a copy of the Library, and, in your modifications, a | |||
| facility refers to a function or data to be supplied by an Application | |||
| that uses the facility (other than as an argument passed when the | |||
| facility is invoked), then you may convey a copy of the modified | |||
| version: | |||
| a) under this License, provided that you make a good faith effort to | |||
| ensure that, in the event an Application does not supply the | |||
| function or data, the facility still operates, and performs | |||
| whatever part of its purpose remains meaningful, or | |||
| b) under the GNU GPL, with none of the additional permissions of | |||
| this License applicable to that copy. | |||
| 3. Object Code Incorporating Material from Library Header Files. | |||
| The object code form of an Application may incorporate material from | |||
| a header file that is part of the Library. You may convey such object | |||
| code under terms of your choice, provided that, if the incorporated | |||
| material is not limited to numerical parameters, data structure | |||
| layouts and accessors, or small macros, inline functions and templates | |||
| (ten or fewer lines in length), you do both of the following: | |||
| a) Give prominent notice with each copy of the object code that the | |||
| Library is used in it and that the Library and its use are | |||
| covered by this License. | |||
| b) Accompany the object code with a copy of the GNU GPL and this license | |||
| document. | |||
| 4. Combined Works. | |||
| You may convey a Combined Work under terms of your choice that, | |||
| taken together, effectively do not restrict modification of the | |||
| portions of the Library contained in the Combined Work and reverse | |||
| engineering for debugging such modifications, if you also do each of | |||
| the following: | |||
| a) Give prominent notice with each copy of the Combined Work that | |||
| the Library is used in it and that the Library and its use are | |||
| covered by this License. | |||
| b) Accompany the Combined Work with a copy of the GNU GPL and this license | |||
| document. | |||
| c) For a Combined Work that displays copyright notices during | |||
| execution, include the copyright notice for the Library among | |||
| these notices, as well as a reference directing the user to the | |||
| copies of the GNU GPL and this license document. | |||
| d) Do one of the following: | |||
| 0) Convey the Minimal Corresponding Source under the terms of this | |||
| License, and the Corresponding Application Code in a form | |||
| suitable for, and under terms that permit, the user to | |||
| recombine or relink the Application with a modified version of | |||
| the Linked Version to produce a modified Combined Work, in the | |||
| manner specified by section 6 of the GNU GPL for conveying | |||
| Corresponding Source. | |||
| 1) Use a suitable shared library mechanism for linking with the | |||
| Library. A suitable mechanism is one that (a) uses at run time | |||
| a copy of the Library already present on the user's computer | |||
| system, and (b) will operate properly with a modified version | |||
| of the Library that is interface-compatible with the Linked | |||
| Version. | |||
| e) Provide Installation Information, but only if you would otherwise | |||
| be required to provide such information under section 6 of the | |||
| GNU GPL, and only to the extent that such information is | |||
| necessary to install and execute a modified version of the | |||
| Combined Work produced by recombining or relinking the | |||
| Application with a modified version of the Linked Version. (If | |||
| you use option 4d0, the Installation Information must accompany | |||
| the Minimal Corresponding Source and Corresponding Application | |||
| Code. If you use option 4d1, you must provide the Installation | |||
| Information in the manner specified by section 6 of the GNU GPL | |||
| for conveying Corresponding Source.) | |||
| 5. Combined Libraries. | |||
| You may place library facilities that are a work based on the | |||
| Library side by side in a single library together with other library | |||
| facilities that are not Applications and are not covered by this | |||
| License, and convey such a combined library under terms of your | |||
| choice, if you do both of the following: | |||
| a) Accompany the combined library with a copy of the same work based | |||
| on the Library, uncombined with any other library facilities, | |||
| conveyed under the terms of this License. | |||
| b) Give prominent notice with the combined library that part of it | |||
| is a work based on the Library, and explaining where to find the | |||
| accompanying uncombined form of the same work. | |||
| 6. Revised Versions of the GNU Lesser General Public License. | |||
| The Free Software Foundation may publish revised and/or new versions | |||
| of the GNU Lesser General Public License from time to time. Such new | |||
| versions will be similar in spirit to the present version, but may | |||
| differ in detail to address new problems or concerns. | |||
| Each version is given a distinguishing version number. If the | |||
| Library as you received it specifies that a certain numbered version | |||
| of the GNU Lesser General Public License "or any later version" | |||
| applies to it, you have the option of following the terms and | |||
| conditions either of that published version or of any later version | |||
| published by the Free Software Foundation. If the Library as you | |||
| received it does not specify a version number of the GNU Lesser | |||
| General Public License, you may choose any version of the GNU Lesser | |||
| General Public License ever published by the Free Software Foundation. | |||
| If the Library as you received it specifies that a proxy can decide | |||
| whether future versions of the GNU Lesser General Public License shall | |||
| apply, that proxy's public statement of acceptance of any version is | |||
| permanent authorization for you to choose that version for the | |||
| Library. | |||
| @@ -1,18 +0,0 @@ | |||
| // +build darwin | |||
| package log | |||
| import ( | |||
| "log" | |||
| "os" | |||
| "syscall" | |||
| ) | |||
| func CrashLog(file string) { | |||
| f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) | |||
| if err != nil { | |||
| log.Println(err.Error()) | |||
| } else { | |||
| syscall.Dup2(int(f.Fd()), 2) | |||
| } | |||
| } | |||
| @@ -1,18 +0,0 @@ | |||
| // +build freebsd openbsd netbsd dragonfly linux | |||
| package log | |||
| import ( | |||
| "log" | |||
| "os" | |||
| "syscall" | |||
| ) | |||
| func CrashLog(file string) { | |||
| f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) | |||
| if err != nil { | |||
| log.Println(err.Error()) | |||
| } else { | |||
| syscall.Dup3(int(f.Fd()), 2, 0) | |||
| } | |||
| } | |||
| @@ -1,37 +0,0 @@ | |||
| // +build windows | |||
| package log | |||
| import ( | |||
| "log" | |||
| "os" | |||
| "syscall" | |||
| ) | |||
| var ( | |||
| kernel32 = syscall.MustLoadDLL("kernel32.dll") | |||
| procSetStdHandle = kernel32.MustFindProc("SetStdHandle") | |||
| ) | |||
| func setStdHandle(stdhandle int32, handle syscall.Handle) error { | |||
| r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) | |||
| if r0 == 0 { | |||
| if e1 != 0 { | |||
| return error(e1) | |||
| } | |||
| return syscall.EINVAL | |||
| } | |||
| return nil | |||
| } | |||
| func CrashLog(file string) { | |||
| f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) | |||
| if err != nil { | |||
| log.Println(err.Error()) | |||
| } else { | |||
| err = setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd())) | |||
| if err != nil { | |||
| log.Println(err.Error()) | |||
| } | |||
| } | |||
| } | |||
| @@ -1,380 +0,0 @@ | |||
| //high level log wrapper, so it can output different log based on level | |||
| package log | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "log" | |||
| "os" | |||
| "runtime" | |||
| "sync" | |||
| "time" | |||
| ) | |||
| const ( | |||
| Ldate = log.Ldate | |||
| Llongfile = log.Llongfile | |||
| Lmicroseconds = log.Lmicroseconds | |||
| Lshortfile = log.Lshortfile | |||
| LstdFlags = log.LstdFlags | |||
| Ltime = log.Ltime | |||
| ) | |||
| type ( | |||
| LogLevel int | |||
| LogType int | |||
| ) | |||
| const ( | |||
| LOG_FATAL = LogType(0x1) | |||
| LOG_ERROR = LogType(0x2) | |||
| LOG_WARNING = LogType(0x4) | |||
| LOG_INFO = LogType(0x8) | |||
| LOG_DEBUG = LogType(0x10) | |||
| ) | |||
| const ( | |||
| LOG_LEVEL_NONE = LogLevel(0x0) | |||
| LOG_LEVEL_FATAL = LOG_LEVEL_NONE | LogLevel(LOG_FATAL) | |||
| LOG_LEVEL_ERROR = LOG_LEVEL_FATAL | LogLevel(LOG_ERROR) | |||
| LOG_LEVEL_WARN = LOG_LEVEL_ERROR | LogLevel(LOG_WARNING) | |||
| LOG_LEVEL_INFO = LOG_LEVEL_WARN | LogLevel(LOG_INFO) | |||
| LOG_LEVEL_DEBUG = LOG_LEVEL_INFO | LogLevel(LOG_DEBUG) | |||
| LOG_LEVEL_ALL = LOG_LEVEL_DEBUG | |||
| ) | |||
| const FORMAT_TIME_DAY string = "20060102" | |||
| const FORMAT_TIME_HOUR string = "2006010215" | |||
| var _log *logger = New() | |||
| func init() { | |||
| SetFlags(Ldate | Ltime | Lshortfile) | |||
| SetHighlighting(runtime.GOOS != "windows") | |||
| } | |||
| func Logger() *log.Logger { | |||
| return _log._log | |||
| } | |||
| func SetLevel(level LogLevel) { | |||
| _log.SetLevel(level) | |||
| } | |||
| func GetLogLevel() LogLevel { | |||
| return _log.level | |||
| } | |||
| func SetOutput(out io.Writer) { | |||
| _log.SetOutput(out) | |||
| } | |||
| func SetOutputByName(path string) error { | |||
| return _log.SetOutputByName(path) | |||
| } | |||
| func SetFlags(flags int) { | |||
| _log._log.SetFlags(flags) | |||
| } | |||
| func Info(v ...interface{}) { | |||
| _log.Info(v...) | |||
| } | |||
| func Infof(format string, v ...interface{}) { | |||
| _log.Infof(format, v...) | |||
| } | |||
| func Debug(v ...interface{}) { | |||
| _log.Debug(v...) | |||
| } | |||
| func Debugf(format string, v ...interface{}) { | |||
| _log.Debugf(format, v...) | |||
| } | |||
| func Warn(v ...interface{}) { | |||
| _log.Warning(v...) | |||
| } | |||
| func Warnf(format string, v ...interface{}) { | |||
| _log.Warningf(format, v...) | |||
| } | |||
| func Warning(v ...interface{}) { | |||
| _log.Warning(v...) | |||
| } | |||
| func Warningf(format string, v ...interface{}) { | |||
| _log.Warningf(format, v...) | |||
| } | |||
| func Error(v ...interface{}) { | |||
| _log.Error(v...) | |||
| } | |||
| func Errorf(format string, v ...interface{}) { | |||
| _log.Errorf(format, v...) | |||
| } | |||
| func Fatal(v ...interface{}) { | |||
| _log.Fatal(v...) | |||
| } | |||
| func Fatalf(format string, v ...interface{}) { | |||
| _log.Fatalf(format, v...) | |||
| } | |||
| func SetLevelByString(level string) { | |||
| _log.SetLevelByString(level) | |||
| } | |||
| func SetHighlighting(highlighting bool) { | |||
| _log.SetHighlighting(highlighting) | |||
| } | |||
| func SetRotateByDay() { | |||
| _log.SetRotateByDay() | |||
| } | |||
| func SetRotateByHour() { | |||
| _log.SetRotateByHour() | |||
| } | |||
| type logger struct { | |||
| _log *log.Logger | |||
| level LogLevel | |||
| highlighting bool | |||
| dailyRolling bool | |||
| hourRolling bool | |||
| fileName string | |||
| logSuffix string | |||
| fd *os.File | |||
| lock sync.Mutex | |||
| } | |||
| func (l *logger) SetHighlighting(highlighting bool) { | |||
| l.highlighting = highlighting | |||
| } | |||
| func (l *logger) SetLevel(level LogLevel) { | |||
| l.level = level | |||
| } | |||
| func (l *logger) SetLevelByString(level string) { | |||
| l.level = StringToLogLevel(level) | |||
| } | |||
| func (l *logger) SetRotateByDay() { | |||
| l.dailyRolling = true | |||
| l.logSuffix = genDayTime(time.Now()) | |||
| } | |||
| func (l *logger) SetRotateByHour() { | |||
| l.hourRolling = true | |||
| l.logSuffix = genHourTime(time.Now()) | |||
| } | |||
| func (l *logger) rotate() error { | |||
| l.lock.Lock() | |||
| defer l.lock.Unlock() | |||
| var suffix string | |||
| if l.dailyRolling { | |||
| suffix = genDayTime(time.Now()) | |||
| } else if l.hourRolling { | |||
| suffix = genHourTime(time.Now()) | |||
| } else { | |||
| return nil | |||
| } | |||
| // Notice: if suffix is not equal to l.LogSuffix, then rotate | |||
| if suffix != l.logSuffix { | |||
| err := l.doRotate(suffix) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (l *logger) doRotate(suffix string) error { | |||
| // Notice: Not check error, is this ok? | |||
| l.fd.Close() | |||
| lastFileName := l.fileName + "." + l.logSuffix | |||
| err := os.Rename(l.fileName, lastFileName) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = l.SetOutputByName(l.fileName) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| l.logSuffix = suffix | |||
| return nil | |||
| } | |||
| func (l *logger) SetOutput(out io.Writer) { | |||
| l._log = log.New(out, l._log.Prefix(), l._log.Flags()) | |||
| } | |||
| func (l *logger) SetOutputByName(path string) error { | |||
| f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| l.SetOutput(f) | |||
| l.fileName = path | |||
| l.fd = f | |||
| return err | |||
| } | |||
| func (l *logger) log(t LogType, v ...interface{}) { | |||
| if l.level|LogLevel(t) != l.level { | |||
| return | |||
| } | |||
| err := l.rotate() | |||
| if err != nil { | |||
| fmt.Fprintf(os.Stderr, "%s\n", err.Error()) | |||
| return | |||
| } | |||
| v1 := make([]interface{}, len(v)+2) | |||
| logStr, logColor := LogTypeToString(t) | |||
| if l.highlighting { | |||
| v1[0] = "\033" + logColor + "m[" + logStr + "]" | |||
| copy(v1[1:], v) | |||
| v1[len(v)+1] = "\033[0m" | |||
| } else { | |||
| v1[0] = "[" + logStr + "]" | |||
| copy(v1[1:], v) | |||
| v1[len(v)+1] = "" | |||
| } | |||
| s := fmt.Sprintln(v1...) | |||
| l._log.Output(4, s) | |||
| } | |||
| func (l *logger) logf(t LogType, format string, v ...interface{}) { | |||
| if l.level|LogLevel(t) != l.level { | |||
| return | |||
| } | |||
| err := l.rotate() | |||
| if err != nil { | |||
| fmt.Fprintf(os.Stderr, "%s\n", err.Error()) | |||
| return | |||
| } | |||
| logStr, logColor := LogTypeToString(t) | |||
| var s string | |||
| if l.highlighting { | |||
| s = "\033" + logColor + "m[" + logStr + "] " + fmt.Sprintf(format, v...) + "\033[0m" | |||
| } else { | |||
| s = "[" + logStr + "] " + fmt.Sprintf(format, v...) | |||
| } | |||
| l._log.Output(4, s) | |||
| } | |||
| func (l *logger) Fatal(v ...interface{}) { | |||
| l.log(LOG_FATAL, v...) | |||
| os.Exit(-1) | |||
| } | |||
| func (l *logger) Fatalf(format string, v ...interface{}) { | |||
| l.logf(LOG_FATAL, format, v...) | |||
| os.Exit(-1) | |||
| } | |||
| func (l *logger) Error(v ...interface{}) { | |||
| l.log(LOG_ERROR, v...) | |||
| } | |||
| func (l *logger) Errorf(format string, v ...interface{}) { | |||
| l.logf(LOG_ERROR, format, v...) | |||
| } | |||
| func (l *logger) Warning(v ...interface{}) { | |||
| l.log(LOG_WARNING, v...) | |||
| } | |||
| func (l *logger) Warningf(format string, v ...interface{}) { | |||
| l.logf(LOG_WARNING, format, v...) | |||
| } | |||
| func (l *logger) Debug(v ...interface{}) { | |||
| l.log(LOG_DEBUG, v...) | |||
| } | |||
| func (l *logger) Debugf(format string, v ...interface{}) { | |||
| l.logf(LOG_DEBUG, format, v...) | |||
| } | |||
| func (l *logger) Info(v ...interface{}) { | |||
| l.log(LOG_INFO, v...) | |||
| } | |||
| func (l *logger) Infof(format string, v ...interface{}) { | |||
| l.logf(LOG_INFO, format, v...) | |||
| } | |||
| func StringToLogLevel(level string) LogLevel { | |||
| switch level { | |||
| case "fatal": | |||
| return LOG_LEVEL_FATAL | |||
| case "error": | |||
| return LOG_LEVEL_ERROR | |||
| case "warn": | |||
| return LOG_LEVEL_WARN | |||
| case "warning": | |||
| return LOG_LEVEL_WARN | |||
| case "debug": | |||
| return LOG_LEVEL_DEBUG | |||
| case "info": | |||
| return LOG_LEVEL_INFO | |||
| } | |||
| return LOG_LEVEL_ALL | |||
| } | |||
| func LogTypeToString(t LogType) (string, string) { | |||
| switch t { | |||
| case LOG_FATAL: | |||
| return "fatal", "[0;31" | |||
| case LOG_ERROR: | |||
| return "error", "[0;31" | |||
| case LOG_WARNING: | |||
| return "warning", "[0;33" | |||
| case LOG_DEBUG: | |||
| return "debug", "[0;36" | |||
| case LOG_INFO: | |||
| return "info", "[0;37" | |||
| } | |||
| return "unknown", "[0;37" | |||
| } | |||
| func genDayTime(t time.Time) string { | |||
| return t.Format(FORMAT_TIME_DAY) | |||
| } | |||
| func genHourTime(t time.Time) string { | |||
| return t.Format(FORMAT_TIME_HOUR) | |||
| } | |||
| func New() *logger { | |||
| return Newlogger(os.Stderr, "") | |||
| } | |||
| func Newlogger(w io.Writer, prefix string) *logger { | |||
| return &logger{_log: log.New(w, prefix, LstdFlags), level: LOG_LEVEL_ALL, highlighting: true} | |||
| } | |||
| @@ -1,72 +0,0 @@ | |||
| // Copyright 2014, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package pools | |||
| import ( | |||
| "fmt" | |||
| "sync" | |||
| ) | |||
| // IDPool is used to ensure that the set of IDs in use concurrently never | |||
| // contains any duplicates. The IDs start at 1 and increase without bound, but | |||
| // will never be larger than the peak number of concurrent uses. | |||
| // | |||
| // IDPool's Get() and Set() methods can be used concurrently. | |||
| type IDPool struct { | |||
| sync.Mutex | |||
| // used holds the set of values that have been returned to us with Put(). | |||
| used map[uint32]bool | |||
| // maxUsed remembers the largest value we've given out. | |||
| maxUsed uint32 | |||
| } | |||
| // NewIDPool creates and initializes an IDPool. | |||
| func NewIDPool() *IDPool { | |||
| return &IDPool{ | |||
| used: make(map[uint32]bool), | |||
| } | |||
| } | |||
| // Get returns an ID that is unique among currently active users of this pool. | |||
| func (pool *IDPool) Get() (id uint32) { | |||
| pool.Lock() | |||
| defer pool.Unlock() | |||
| // Pick a value that's been returned, if any. | |||
| for key, _ := range pool.used { | |||
| delete(pool.used, key) | |||
| return key | |||
| } | |||
| // No recycled IDs are available, so increase the pool size. | |||
| pool.maxUsed += 1 | |||
| return pool.maxUsed | |||
| } | |||
| // Put recycles an ID back into the pool for others to use. Putting back a value | |||
| // or 0, or a value that is not currently "checked out", will result in a panic | |||
| // because that should never happen except in the case of a programming error. | |||
| func (pool *IDPool) Put(id uint32) { | |||
| pool.Lock() | |||
| defer pool.Unlock() | |||
| if id < 1 || id > pool.maxUsed { | |||
| panic(fmt.Errorf("IDPool.Put(%v): invalid value, must be in the range [1,%v]", id, pool.maxUsed)) | |||
| } | |||
| if pool.used[id] { | |||
| panic(fmt.Errorf("IDPool.Put(%v): can't put value that was already recycled", id)) | |||
| } | |||
| // If we're recycling maxUsed, just shrink the pool. | |||
| if id == pool.maxUsed { | |||
| pool.maxUsed = id - 1 | |||
| return | |||
| } | |||
| // Add it to the set of recycled IDs. | |||
| pool.used[id] = true | |||
| } | |||
| @@ -1,149 +0,0 @@ | |||
| // Copyright 2012, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package pools | |||
| import ( | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| ) | |||
| // Numbered allows you to manage resources by tracking them with numbers. | |||
| // There are no interface restrictions on what you can track. | |||
| type Numbered struct { | |||
| mu sync.Mutex | |||
| empty *sync.Cond // Broadcast when pool becomes empty | |||
| resources map[int64]*numberedWrapper | |||
| } | |||
| type numberedWrapper struct { | |||
| val interface{} | |||
| inUse bool | |||
| purpose string | |||
| timeCreated time.Time | |||
| timeUsed time.Time | |||
| } | |||
| func NewNumbered() *Numbered { | |||
| n := &Numbered{resources: make(map[int64]*numberedWrapper)} | |||
| n.empty = sync.NewCond(&n.mu) | |||
| return n | |||
| } | |||
| // Register starts tracking a resource by the supplied id. | |||
| // It does not lock the object. | |||
| // It returns an error if the id already exists. | |||
| func (nu *Numbered) Register(id int64, val interface{}) error { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| if _, ok := nu.resources[id]; ok { | |||
| return fmt.Errorf("already present") | |||
| } | |||
| now := time.Now() | |||
| nu.resources[id] = &numberedWrapper{ | |||
| val: val, | |||
| timeCreated: now, | |||
| timeUsed: now, | |||
| } | |||
| return nil | |||
| } | |||
| // Unregiester forgets the specified resource. | |||
| // If the resource is not present, it's ignored. | |||
| func (nu *Numbered) Unregister(id int64) { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| delete(nu.resources, id) | |||
| if len(nu.resources) == 0 { | |||
| nu.empty.Broadcast() | |||
| } | |||
| } | |||
| // Get locks the resource for use. It accepts a purpose as a string. | |||
| // If it cannot be found, it returns a "not found" error. If in use, | |||
| // it returns a "in use: purpose" error. | |||
| func (nu *Numbered) Get(id int64, purpose string) (val interface{}, err error) { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| nw, ok := nu.resources[id] | |||
| if !ok { | |||
| return nil, fmt.Errorf("not found") | |||
| } | |||
| if nw.inUse { | |||
| return nil, fmt.Errorf("in use: %s", nw.purpose) | |||
| } | |||
| nw.inUse = true | |||
| nw.purpose = purpose | |||
| return nw.val, nil | |||
| } | |||
| // Put unlocks a resource for someone else to use. | |||
| func (nu *Numbered) Put(id int64) { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| if nw, ok := nu.resources[id]; ok { | |||
| nw.inUse = false | |||
| nw.purpose = "" | |||
| nw.timeUsed = time.Now() | |||
| } | |||
| } | |||
| // GetOutdated returns a list of resources that are older than age, and locks them. | |||
| // It does not return any resources that are already locked. | |||
| func (nu *Numbered) GetOutdated(age time.Duration, purpose string) (vals []interface{}) { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| now := time.Now() | |||
| for _, nw := range nu.resources { | |||
| if nw.inUse { | |||
| continue | |||
| } | |||
| if nw.timeCreated.Add(age).Sub(now) <= 0 { | |||
| nw.inUse = true | |||
| nw.purpose = purpose | |||
| vals = append(vals, nw.val) | |||
| } | |||
| } | |||
| return vals | |||
| } | |||
| // GetIdle returns a list of resurces that have been idle for longer | |||
| // than timeout, and locks them. It does not return any resources that | |||
| // are already locked. | |||
| func (nu *Numbered) GetIdle(timeout time.Duration, purpose string) (vals []interface{}) { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| now := time.Now() | |||
| for _, nw := range nu.resources { | |||
| if nw.inUse { | |||
| continue | |||
| } | |||
| if nw.timeUsed.Add(timeout).Sub(now) <= 0 { | |||
| nw.inUse = true | |||
| nw.purpose = purpose | |||
| vals = append(vals, nw.val) | |||
| } | |||
| } | |||
| return vals | |||
| } | |||
| // WaitForEmpty returns as soon as the pool becomes empty | |||
| func (nu *Numbered) WaitForEmpty() { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| for len(nu.resources) != 0 { | |||
| nu.empty.Wait() | |||
| } | |||
| } | |||
| func (nu *Numbered) StatsJSON() string { | |||
| return fmt.Sprintf("{\"Size\": %v}", nu.Size()) | |||
| } | |||
| func (nu *Numbered) Size() (size int64) { | |||
| nu.mu.Lock() | |||
| defer nu.mu.Unlock() | |||
| return int64(len(nu.resources)) | |||
| } | |||
| @@ -1,228 +0,0 @@ | |||
| // Copyright 2012, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package pools provides functionality to manage and reuse resources | |||
| // like connections. | |||
| package pools | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "github.com/ngaut/sync2" | |||
| ) | |||
| var ( | |||
| CLOSED_ERR = fmt.Errorf("ResourcePool is closed") | |||
| ) | |||
| // Factory is a function that can be used to create a resource. | |||
| type Factory func() (Resource, error) | |||
| // Every resource needs to suport the Resource interface. | |||
| // Thread synchronization between Close() and IsClosed() | |||
| // is the responsibility the caller. | |||
| type Resource interface { | |||
| Close() | |||
| } | |||
| // ResourcePool allows you to use a pool of resources. | |||
| type ResourcePool struct { | |||
| resources chan resourceWrapper | |||
| factory Factory | |||
| capacity sync2.AtomicInt64 | |||
| idleTimeout sync2.AtomicDuration | |||
| // stats | |||
| waitCount sync2.AtomicInt64 | |||
| waitTime sync2.AtomicDuration | |||
| } | |||
| type resourceWrapper struct { | |||
| resource Resource | |||
| timeUsed time.Time | |||
| } | |||
| // NewResourcePool creates a new ResourcePool pool. | |||
| // capacity is the initial capacity of the pool. | |||
| // maxCap is the maximum capacity. | |||
| // If a resource is unused beyond idleTimeout, it's discarded. | |||
| // An idleTimeout of 0 means that there is no timeout. | |||
| func NewResourcePool(factory Factory, capacity, maxCap int, idleTimeout time.Duration) *ResourcePool { | |||
| if capacity <= 0 || maxCap <= 0 || capacity > maxCap { | |||
| panic(fmt.Errorf("Invalid/out of range capacity")) | |||
| } | |||
| rp := &ResourcePool{ | |||
| resources: make(chan resourceWrapper, maxCap), | |||
| factory: factory, | |||
| capacity: sync2.AtomicInt64(capacity), | |||
| idleTimeout: sync2.AtomicDuration(idleTimeout), | |||
| } | |||
| for i := 0; i < capacity; i++ { | |||
| rp.resources <- resourceWrapper{} | |||
| } | |||
| return rp | |||
| } | |||
| // Close empties the pool calling Close on all its resources. | |||
| // You can call Close while there are outstanding resources. | |||
| // It waits for all resources to be returned (Put). | |||
| // After a Close, Get and TryGet are not allowed. | |||
| func (rp *ResourcePool) Close() { | |||
| rp.SetCapacity(0) | |||
| } | |||
| func (rp *ResourcePool) IsClosed() (closed bool) { | |||
| return rp.capacity.Get() == 0 | |||
| } | |||
| // Get will return the next available resource. If capacity | |||
| // has not been reached, it will create a new one using the factory. Otherwise, | |||
| // it will indefinitely wait till the next resource becomes available. | |||
| func (rp *ResourcePool) Get() (resource Resource, err error) { | |||
| return rp.get(true) | |||
| } | |||
| // TryGet will return the next available resource. If none is available, and capacity | |||
| // has not been reached, it will create a new one using the factory. Otherwise, | |||
| // it will return nil with no error. | |||
| func (rp *ResourcePool) TryGet() (resource Resource, err error) { | |||
| return rp.get(false) | |||
| } | |||
| func (rp *ResourcePool) get(wait bool) (resource Resource, err error) { | |||
| // Fetch | |||
| var wrapper resourceWrapper | |||
| var ok bool | |||
| select { | |||
| case wrapper, ok = <-rp.resources: | |||
| default: | |||
| if !wait { | |||
| return nil, nil | |||
| } | |||
| startTime := time.Now() | |||
| wrapper, ok = <-rp.resources | |||
| rp.recordWait(startTime) | |||
| } | |||
| if !ok { | |||
| return nil, CLOSED_ERR | |||
| } | |||
| // Unwrap | |||
| timeout := rp.idleTimeout.Get() | |||
| if wrapper.resource != nil && timeout > 0 && wrapper.timeUsed.Add(timeout).Sub(time.Now()) < 0 { | |||
| wrapper.resource.Close() | |||
| wrapper.resource = nil | |||
| } | |||
| if wrapper.resource == nil { | |||
| wrapper.resource, err = rp.factory() | |||
| if err != nil { | |||
| rp.resources <- resourceWrapper{} | |||
| } | |||
| } | |||
| return wrapper.resource, err | |||
| } | |||
| // Put will return a resource to the pool. For every successful Get, | |||
| // a corresponding Put is required. If you no longer need a resource, | |||
| // you will need to call Put(nil) instead of returning the closed resource. | |||
| // The will eventually cause a new resource to be created in its place. | |||
| func (rp *ResourcePool) Put(resource Resource) { | |||
| var wrapper resourceWrapper | |||
| if resource != nil { | |||
| wrapper = resourceWrapper{resource, time.Now()} | |||
| } | |||
| select { | |||
| case rp.resources <- wrapper: | |||
| default: | |||
| panic(fmt.Errorf("Attempt to Put into a full ResourcePool")) | |||
| } | |||
| } | |||
| // SetCapacity changes the capacity of the pool. | |||
| // You can use it to shrink or expand, but not beyond | |||
| // the max capacity. If the change requires the pool | |||
| // to be shrunk, SetCapacity waits till the necessary | |||
| // number of resources are returned to the pool. | |||
| // A SetCapacity of 0 is equivalent to closing the ResourcePool. | |||
| func (rp *ResourcePool) SetCapacity(capacity int) error { | |||
| if capacity < 0 || capacity > cap(rp.resources) { | |||
| return fmt.Errorf("capacity %d is out of range", capacity) | |||
| } | |||
| // Atomically swap new capacity with old, but only | |||
| // if old capacity is non-zero. | |||
| var oldcap int | |||
| for { | |||
| oldcap = int(rp.capacity.Get()) | |||
| if oldcap == 0 { | |||
| return CLOSED_ERR | |||
| } | |||
| if oldcap == capacity { | |||
| return nil | |||
| } | |||
| if rp.capacity.CompareAndSwap(int64(oldcap), int64(capacity)) { | |||
| break | |||
| } | |||
| } | |||
| if capacity < oldcap { | |||
| for i := 0; i < oldcap-capacity; i++ { | |||
| wrapper := <-rp.resources | |||
| if wrapper.resource != nil { | |||
| wrapper.resource.Close() | |||
| } | |||
| } | |||
| } else { | |||
| for i := 0; i < capacity-oldcap; i++ { | |||
| rp.resources <- resourceWrapper{} | |||
| } | |||
| } | |||
| if capacity == 0 { | |||
| close(rp.resources) | |||
| } | |||
| return nil | |||
| } | |||
| func (rp *ResourcePool) recordWait(start time.Time) { | |||
| rp.waitCount.Add(1) | |||
| rp.waitTime.Add(time.Now().Sub(start)) | |||
| } | |||
| func (rp *ResourcePool) SetIdleTimeout(idleTimeout time.Duration) { | |||
| rp.idleTimeout.Set(idleTimeout) | |||
| } | |||
| func (rp *ResourcePool) StatsJSON() string { | |||
| c, a, mx, wc, wt, it := rp.Stats() | |||
| return fmt.Sprintf(`{"Capacity": %v, "Available": %v, "MaxCapacity": %v, "WaitCount": %v, "WaitTime": %v, "IdleTimeout": %v}`, c, a, mx, wc, int64(wt), int64(it)) | |||
| } | |||
| func (rp *ResourcePool) Stats() (capacity, available, maxCap, waitCount int64, waitTime, idleTimeout time.Duration) { | |||
| return rp.Capacity(), rp.Available(), rp.MaxCap(), rp.WaitCount(), rp.WaitTime(), rp.IdleTimeout() | |||
| } | |||
| func (rp *ResourcePool) Capacity() int64 { | |||
| return rp.capacity.Get() | |||
| } | |||
| func (rp *ResourcePool) Available() int64 { | |||
| return int64(len(rp.resources)) | |||
| } | |||
| func (rp *ResourcePool) MaxCap() int64 { | |||
| return int64(cap(rp.resources)) | |||
| } | |||
| func (rp *ResourcePool) WaitCount() int64 { | |||
| return rp.waitCount.Get() | |||
| } | |||
| func (rp *ResourcePool) WaitTime() time.Duration { | |||
| return rp.waitTime.Get() | |||
| } | |||
| func (rp *ResourcePool) IdleTimeout() time.Duration { | |||
| return rp.idleTimeout.Get() | |||
| } | |||
| @@ -1,214 +0,0 @@ | |||
| // Copyright 2012, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package pools | |||
| import ( | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| ) | |||
| // RoundRobin is deprecated. Use ResourcePool instead. | |||
| // RoundRobin allows you to use a pool of resources in a round robin fashion. | |||
| type RoundRobin struct { | |||
| mu sync.Mutex | |||
| available *sync.Cond | |||
| resources chan fifoWrapper | |||
| size int64 | |||
| factory Factory | |||
| idleTimeout time.Duration | |||
| // stats | |||
| waitCount int64 | |||
| waitTime time.Duration | |||
| } | |||
| type fifoWrapper struct { | |||
| resource Resource | |||
| timeUsed time.Time | |||
| } | |||
| // NewRoundRobin creates a new RoundRobin pool. | |||
| // capacity is the maximum number of resources RoundRobin will create. | |||
| // factory will be the function used to create resources. | |||
| // If a resource is unused beyond idleTimeout, it's discarded. | |||
| func NewRoundRobin(capacity int, idleTimeout time.Duration) *RoundRobin { | |||
| r := &RoundRobin{ | |||
| resources: make(chan fifoWrapper, capacity), | |||
| size: 0, | |||
| idleTimeout: idleTimeout, | |||
| } | |||
| r.available = sync.NewCond(&r.mu) | |||
| return r | |||
| } | |||
| // Open starts allowing the creation of resources | |||
| func (rr *RoundRobin) Open(factory Factory) { | |||
| rr.mu.Lock() | |||
| defer rr.mu.Unlock() | |||
| rr.factory = factory | |||
| } | |||
| // Close empties the pool calling Close on all its resources. | |||
| // It waits for all resources to be returned (Put). | |||
| func (rr *RoundRobin) Close() { | |||
| rr.mu.Lock() | |||
| defer rr.mu.Unlock() | |||
| for rr.size > 0 { | |||
| select { | |||
| case fw := <-rr.resources: | |||
| go fw.resource.Close() | |||
| rr.size-- | |||
| default: | |||
| rr.available.Wait() | |||
| } | |||
| } | |||
| rr.factory = nil | |||
| } | |||
| func (rr *RoundRobin) IsClosed() bool { | |||
| return rr.factory == nil | |||
| } | |||
| // Get will return the next available resource. If none is available, and capacity | |||
| // has not been reached, it will create a new one using the factory. Otherwise, | |||
| // it will indefinitely wait till the next resource becomes available. | |||
| func (rr *RoundRobin) Get() (resource Resource, err error) { | |||
| return rr.get(true) | |||
| } | |||
| // TryGet will return the next available resource. If none is available, and capacity | |||
| // has not been reached, it will create a new one using the factory. Otherwise, | |||
| // it will return nil with no error. | |||
| func (rr *RoundRobin) TryGet() (resource Resource, err error) { | |||
| return rr.get(false) | |||
| } | |||
| func (rr *RoundRobin) get(wait bool) (resource Resource, err error) { | |||
| rr.mu.Lock() | |||
| defer rr.mu.Unlock() | |||
| // Any waits in this loop will release the lock, and it will be | |||
| // reacquired before the waits return. | |||
| for { | |||
| select { | |||
| case fw := <-rr.resources: | |||
| // Found a free resource in the channel | |||
| if rr.idleTimeout > 0 && fw.timeUsed.Add(rr.idleTimeout).Sub(time.Now()) < 0 { | |||
| // resource has been idle for too long. Discard & go for next. | |||
| go fw.resource.Close() | |||
| rr.size-- | |||
| // Nobody else should be waiting, but signal anyway. | |||
| rr.available.Signal() | |||
| continue | |||
| } | |||
| return fw.resource, nil | |||
| default: | |||
| // resource channel is empty | |||
| if rr.size >= int64(cap(rr.resources)) { | |||
| // The pool is full | |||
| if wait { | |||
| start := time.Now() | |||
| rr.available.Wait() | |||
| rr.recordWait(start) | |||
| continue | |||
| } | |||
| return nil, nil | |||
| } | |||
| // Pool is not full. Create a resource. | |||
| if resource, err = rr.waitForCreate(); err != nil { | |||
| // size was decremented, and somebody could be waiting. | |||
| rr.available.Signal() | |||
| return nil, err | |||
| } | |||
| // Creation successful. Account for this by incrementing size. | |||
| rr.size++ | |||
| return resource, err | |||
| } | |||
| } | |||
| } | |||
| func (rr *RoundRobin) recordWait(start time.Time) { | |||
| rr.waitCount++ | |||
| rr.waitTime += time.Now().Sub(start) | |||
| } | |||
| func (rr *RoundRobin) waitForCreate() (resource Resource, err error) { | |||
| // Prevent thundering herd: increment size before creating resource, and decrement after. | |||
| rr.size++ | |||
| rr.mu.Unlock() | |||
| defer func() { | |||
| rr.mu.Lock() | |||
| rr.size-- | |||
| }() | |||
| return rr.factory() | |||
| } | |||
| // Put will return a resource to the pool. You MUST return every resource to the pool, | |||
| // even if it's closed. If a resource is closed, you should call Put(nil). | |||
| func (rr *RoundRobin) Put(resource Resource) { | |||
| rr.mu.Lock() | |||
| defer rr.available.Signal() | |||
| defer rr.mu.Unlock() | |||
| if rr.size > int64(cap(rr.resources)) { | |||
| if resource != nil { | |||
| go resource.Close() | |||
| } | |||
| rr.size-- | |||
| } else if resource == nil { | |||
| rr.size-- | |||
| } else { | |||
| if len(rr.resources) == cap(rr.resources) { | |||
| panic("unexpected") | |||
| } | |||
| rr.resources <- fifoWrapper{resource, time.Now()} | |||
| } | |||
| } | |||
| // Set capacity changes the capacity of the pool. | |||
| // You can use it to expand or shrink. | |||
| func (rr *RoundRobin) SetCapacity(capacity int) error { | |||
| rr.mu.Lock() | |||
| defer rr.available.Broadcast() | |||
| defer rr.mu.Unlock() | |||
| nr := make(chan fifoWrapper, capacity) | |||
| // This loop transfers resources from the old channel | |||
| // to the new one, until it fills up or runs out. | |||
| // It discards extras, if any. | |||
| for { | |||
| select { | |||
| case fw := <-rr.resources: | |||
| if len(nr) < cap(nr) { | |||
| nr <- fw | |||
| } else { | |||
| go fw.resource.Close() | |||
| rr.size-- | |||
| } | |||
| continue | |||
| default: | |||
| } | |||
| break | |||
| } | |||
| rr.resources = nr | |||
| return nil | |||
| } | |||
| func (rr *RoundRobin) SetIdleTimeout(idleTimeout time.Duration) { | |||
| rr.mu.Lock() | |||
| defer rr.mu.Unlock() | |||
| rr.idleTimeout = idleTimeout | |||
| } | |||
| func (rr *RoundRobin) StatsJSON() string { | |||
| s, c, a, wc, wt, it := rr.Stats() | |||
| return fmt.Sprintf("{\"Size\": %v, \"Capacity\": %v, \"Available\": %v, \"WaitCount\": %v, \"WaitTime\": %v, \"IdleTimeout\": %v}", s, c, a, wc, int64(wt), int64(it)) | |||
| } | |||
| func (rr *RoundRobin) Stats() (size, capacity, available, waitCount int64, waitTime, idleTimeout time.Duration) { | |||
| rr.mu.Lock() | |||
| defer rr.mu.Unlock() | |||
| return rr.size, int64(cap(rr.resources)), int64(len(rr.resources)), rr.waitCount, rr.waitTime, rr.idleTimeout | |||
| } | |||
| @@ -1,114 +0,0 @@ | |||
| // Copyright 2013, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package sync2 | |||
| import ( | |||
| "sync" | |||
| "sync/atomic" | |||
| "time" | |||
| ) | |||
| type AtomicInt32 int32 | |||
| func (i *AtomicInt32) Add(n int32) int32 { | |||
| return atomic.AddInt32((*int32)(i), n) | |||
| } | |||
| func (i *AtomicInt32) Set(n int32) { | |||
| atomic.StoreInt32((*int32)(i), n) | |||
| } | |||
| func (i *AtomicInt32) Get() int32 { | |||
| return atomic.LoadInt32((*int32)(i)) | |||
| } | |||
| func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) { | |||
| return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval) | |||
| } | |||
| type AtomicUint32 uint32 | |||
| func (i *AtomicUint32) Add(n uint32) uint32 { | |||
| return atomic.AddUint32((*uint32)(i), n) | |||
| } | |||
| func (i *AtomicUint32) Set(n uint32) { | |||
| atomic.StoreUint32((*uint32)(i), n) | |||
| } | |||
| func (i *AtomicUint32) Get() uint32 { | |||
| return atomic.LoadUint32((*uint32)(i)) | |||
| } | |||
| func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) { | |||
| return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval) | |||
| } | |||
| type AtomicInt64 int64 | |||
| func (i *AtomicInt64) Add(n int64) int64 { | |||
| return atomic.AddInt64((*int64)(i), n) | |||
| } | |||
| func (i *AtomicInt64) Set(n int64) { | |||
| atomic.StoreInt64((*int64)(i), n) | |||
| } | |||
| func (i *AtomicInt64) Get() int64 { | |||
| return atomic.LoadInt64((*int64)(i)) | |||
| } | |||
| func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) { | |||
| return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval) | |||
| } | |||
| type AtomicDuration int64 | |||
| func (d *AtomicDuration) Add(duration time.Duration) time.Duration { | |||
| return time.Duration(atomic.AddInt64((*int64)(d), int64(duration))) | |||
| } | |||
| func (d *AtomicDuration) Set(duration time.Duration) { | |||
| atomic.StoreInt64((*int64)(d), int64(duration)) | |||
| } | |||
| func (d *AtomicDuration) Get() time.Duration { | |||
| return time.Duration(atomic.LoadInt64((*int64)(d))) | |||
| } | |||
| func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) { | |||
| return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval)) | |||
| } | |||
| // AtomicString gives you atomic-style APIs for string, but | |||
| // it's only a convenience wrapper that uses a mutex. So, it's | |||
| // not as efficient as the rest of the atomic types. | |||
| type AtomicString struct { | |||
| mu sync.Mutex | |||
| str string | |||
| } | |||
| func (s *AtomicString) Set(str string) { | |||
| s.mu.Lock() | |||
| s.str = str | |||
| s.mu.Unlock() | |||
| } | |||
| func (s *AtomicString) Get() string { | |||
| s.mu.Lock() | |||
| str := s.str | |||
| s.mu.Unlock() | |||
| return str | |||
| } | |||
| func (s *AtomicString) CompareAndSwap(oldval, newval string) (swqpped bool) { | |||
| s.mu.Lock() | |||
| defer s.mu.Unlock() | |||
| if s.str == oldval { | |||
| s.str = newval | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| @@ -1,56 +0,0 @@ | |||
| // Copyright 2013, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package sync2 | |||
| import ( | |||
| "sync" | |||
| ) | |||
| // Cond is an alternate implementation of sync.Cond | |||
| type Cond struct { | |||
| L sync.Locker | |||
| sema chan struct{} | |||
| waiters AtomicInt64 | |||
| } | |||
| func NewCond(l sync.Locker) *Cond { | |||
| return &Cond{L: l, sema: make(chan struct{})} | |||
| } | |||
| func (c *Cond) Wait() { | |||
| c.waiters.Add(1) | |||
| c.L.Unlock() | |||
| <-c.sema | |||
| c.L.Lock() | |||
| } | |||
| func (c *Cond) Signal() { | |||
| for { | |||
| w := c.waiters.Get() | |||
| if w == 0 { | |||
| return | |||
| } | |||
| if c.waiters.CompareAndSwap(w, w-1) { | |||
| break | |||
| } | |||
| } | |||
| c.sema <- struct{}{} | |||
| } | |||
| func (c *Cond) Broadcast() { | |||
| var w int64 | |||
| for { | |||
| w = c.waiters.Get() | |||
| if w == 0 { | |||
| return | |||
| } | |||
| if c.waiters.CompareAndSwap(w, 0) { | |||
| break | |||
| } | |||
| } | |||
| for i := int64(0); i < w; i++ { | |||
| c.sema <- struct{}{} | |||
| } | |||
| } | |||
| @@ -1,55 +0,0 @@ | |||
| // Copyright 2012, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package sync2 | |||
| // What's in a name? Channels have all you need to emulate a counting | |||
| // semaphore with a boatload of extra functionality. However, in some | |||
| // cases, you just want a familiar API. | |||
| import ( | |||
| "time" | |||
| ) | |||
| // Semaphore is a counting semaphore with the option to | |||
| // specify a timeout. | |||
| type Semaphore struct { | |||
| slots chan struct{} | |||
| timeout time.Duration | |||
| } | |||
| // NewSemaphore creates a Semaphore. The count parameter must be a positive | |||
| // number. A timeout of zero means that there is no timeout. | |||
| func NewSemaphore(count int, timeout time.Duration) *Semaphore { | |||
| sem := &Semaphore{ | |||
| slots: make(chan struct{}, count), | |||
| timeout: timeout, | |||
| } | |||
| for i := 0; i < count; i++ { | |||
| sem.slots <- struct{}{} | |||
| } | |||
| return sem | |||
| } | |||
| // Acquire returns true on successful acquisition, and | |||
| // false on a timeout. | |||
| func (sem *Semaphore) Acquire() bool { | |||
| if sem.timeout == 0 { | |||
| <-sem.slots | |||
| return true | |||
| } | |||
| select { | |||
| case <-sem.slots: | |||
| return true | |||
| case <-time.After(sem.timeout): | |||
| return false | |||
| } | |||
| } | |||
| // Release releases the acquired semaphore. You must | |||
| // not release more than the number of semaphores you've | |||
| // acquired. | |||
| func (sem *Semaphore) Release() { | |||
| sem.slots <- struct{}{} | |||
| } | |||
| @@ -1,121 +0,0 @@ | |||
| // Copyright 2013, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package sync2 | |||
| import ( | |||
| "sync" | |||
| ) | |||
| // These are the three predefined states of a service. | |||
| const ( | |||
| SERVICE_STOPPED = iota | |||
| SERVICE_RUNNING | |||
| SERVICE_SHUTTING_DOWN | |||
| ) | |||
| var stateNames = []string{ | |||
| "Stopped", | |||
| "Running", | |||
| "ShuttingDown", | |||
| } | |||
| // ServiceManager manages the state of a service through its lifecycle. | |||
| type ServiceManager struct { | |||
| mu sync.Mutex | |||
| wg sync.WaitGroup | |||
| err error // err is the error returned from the service function. | |||
| state AtomicInt64 | |||
| // shutdown is created when the service starts and is closed when the service | |||
| // enters the SERVICE_SHUTTING_DOWN state. | |||
| shutdown chan struct{} | |||
| } | |||
| // Go tries to change the state from SERVICE_STOPPED to SERVICE_RUNNING. | |||
| // | |||
| // If the current state is not SERVICE_STOPPED (already running), it returns | |||
| // false immediately. | |||
| // | |||
| // On successful transition, it launches the service as a goroutine and returns | |||
| // true. The service function is responsible for returning on its own when | |||
| // requested, either by regularly checking svc.IsRunning(), or by waiting for | |||
| // the svc.ShuttingDown channel to be closed. | |||
| // | |||
| // When the service func returns, the state is reverted to SERVICE_STOPPED. | |||
| func (svm *ServiceManager) Go(service func(svc *ServiceContext) error) bool { | |||
| svm.mu.Lock() | |||
| defer svm.mu.Unlock() | |||
| if !svm.state.CompareAndSwap(SERVICE_STOPPED, SERVICE_RUNNING) { | |||
| return false | |||
| } | |||
| svm.wg.Add(1) | |||
| svm.err = nil | |||
| svm.shutdown = make(chan struct{}) | |||
| go func() { | |||
| svm.err = service(&ServiceContext{ShuttingDown: svm.shutdown}) | |||
| svm.state.Set(SERVICE_STOPPED) | |||
| svm.wg.Done() | |||
| }() | |||
| return true | |||
| } | |||
| // Stop tries to change the state from SERVICE_RUNNING to SERVICE_SHUTTING_DOWN. | |||
| // If the current state is not SERVICE_RUNNING, it returns false immediately. | |||
| // On successul transition, it waits for the service to finish, and returns true. | |||
| // You are allowed to Go() again after a Stop(). | |||
| func (svm *ServiceManager) Stop() bool { | |||
| svm.mu.Lock() | |||
| defer svm.mu.Unlock() | |||
| if !svm.state.CompareAndSwap(SERVICE_RUNNING, SERVICE_SHUTTING_DOWN) { | |||
| return false | |||
| } | |||
| // Signal the service that we've transitioned to SERVICE_SHUTTING_DOWN. | |||
| close(svm.shutdown) | |||
| svm.shutdown = nil | |||
| svm.wg.Wait() | |||
| return true | |||
| } | |||
| // Wait waits for the service to terminate if it's currently running. | |||
| func (svm *ServiceManager) Wait() { | |||
| svm.wg.Wait() | |||
| } | |||
| // Join waits for the service to terminate and returns the value returned by the | |||
| // service function. | |||
| func (svm *ServiceManager) Join() error { | |||
| svm.wg.Wait() | |||
| return svm.err | |||
| } | |||
| // State returns the current state of the service. | |||
| // This should only be used to report the current state. | |||
| func (svm *ServiceManager) State() int64 { | |||
| return svm.state.Get() | |||
| } | |||
| // StateName returns the name of the current state. | |||
| func (svm *ServiceManager) StateName() string { | |||
| return stateNames[svm.State()] | |||
| } | |||
| // ServiceContext is passed into the service function to give it access to | |||
| // information about the running service. | |||
| type ServiceContext struct { | |||
| // ShuttingDown is a channel that the service can select on to be notified | |||
| // when it should shut down. The channel is closed when the state transitions | |||
| // from SERVICE_RUNNING to SERVICE_SHUTTING_DOWN. | |||
| ShuttingDown chan struct{} | |||
| } | |||
| // IsRunning returns true if the ServiceContext.ShuttingDown channel has not | |||
| // been closed yet. | |||
| func (svc *ServiceContext) IsRunning() bool { | |||
| select { | |||
| case <-svc.ShuttingDown: | |||
| return false | |||
| default: | |||
| return true | |||
| } | |||
| } | |||
| @@ -1,202 +0,0 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "{}" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright {yyyy} {name of copyright owner} | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -1,243 +0,0 @@ | |||
| package client | |||
| import ( | |||
| "container/list" | |||
| "time" | |||
| "github.com/juju/errors" | |||
| "github.com/ngaut/log" | |||
| "github.com/ngaut/tso/proto" | |||
| "github.com/ngaut/tso/util" | |||
| "github.com/ngaut/zkhelper" | |||
| ) | |||
| const ( | |||
| maxPipelineRequest = 100000 | |||
| ) | |||
| // Client is a timestamp oracle client. | |||
| type Client struct { | |||
| requests chan *PipelineRequest | |||
| pending *list.List | |||
| conf *Conf | |||
| addr string | |||
| leaderCh chan string | |||
| } | |||
| // Conf is the configuration. | |||
| type Conf struct { | |||
| // tso server address, it will be deprecated later. | |||
| ServerAddr string | |||
| // ZKAddr is for zookeeper address, if set, client will ignore ServerAddr | |||
| // and find the leader tso server address in zookeeper. | |||
| // Later ServerAddr is just for simple test and backward compatibility. | |||
| ZKAddr string | |||
| // root path is the tso server saving in zookeeper, like /zk/tso. | |||
| RootPath string | |||
| } | |||
| // PipelineRequest let you get the timestamp with pipeline. | |||
| type PipelineRequest struct { | |||
| done chan error | |||
| reply *proto.Response | |||
| } | |||
| func newPipelineRequest() *PipelineRequest { | |||
| return &PipelineRequest{ | |||
| done: make(chan error, 1), | |||
| } | |||
| } | |||
| // MarkDone sets the repsone for current request. | |||
| func (pr *PipelineRequest) MarkDone(reply *proto.Response, err error) { | |||
| if err != nil { | |||
| pr.reply = nil | |||
| } | |||
| pr.reply = reply | |||
| pr.done <- errors.Trace(err) | |||
| } | |||
| // GetTS gets the timestamp. | |||
| func (pr *PipelineRequest) GetTS() (*proto.Timestamp, error) { | |||
| err := <-pr.done | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return &pr.reply.Timestamp, nil | |||
| } | |||
| // NewClient creates a timestamp oracle client. | |||
| func NewClient(conf *Conf) *Client { | |||
| c := &Client{ | |||
| requests: make(chan *PipelineRequest, maxPipelineRequest), | |||
| pending: list.New(), | |||
| conf: conf, | |||
| leaderCh: make(chan string, 1), | |||
| } | |||
| if len(conf.ZKAddr) == 0 { | |||
| c.leaderCh <- conf.ServerAddr | |||
| } else { | |||
| go c.watchLeader() | |||
| } | |||
| go c.workerLoop() | |||
| return c | |||
| } | |||
| func (c *Client) cleanupPending(err error) { | |||
| log.Warn(err) | |||
| length := c.pending.Len() | |||
| for i := 0; i < length; i++ { | |||
| e := c.pending.Front() | |||
| c.pending.Remove(e) | |||
| e.Value.(*PipelineRequest).MarkDone(nil, err) | |||
| } | |||
| // clear request in channel too | |||
| length = len(c.requests) | |||
| for i := 0; i < length; i++ { | |||
| req := <-c.requests | |||
| req.MarkDone(nil, err) | |||
| } | |||
| } | |||
| func (c *Client) notifyOne(reply *proto.Response) { | |||
| e := c.pending.Front() | |||
| c.pending.Remove(e) | |||
| req := e.Value.(*PipelineRequest) | |||
| req.MarkDone(reply, nil) | |||
| } | |||
| func (c *Client) writeRequests(session *Conn) error { | |||
| var protoHdr [1]byte | |||
| for i := 0; i < c.pending.Len(); i++ { | |||
| session.Write(protoHdr[:]) | |||
| } | |||
| return session.Flush() | |||
| } | |||
| func (c *Client) handleResponse(session *Conn) error { | |||
| length := c.pending.Len() | |||
| for i := 0; i < length; i++ { | |||
| var resp proto.Response | |||
| err := resp.Decode(session) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.notifyOne(&resp) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *Client) do() error { | |||
| session, err := NewConnection(c.addr, time.Duration(1*time.Second)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| log.Debugf("connect tso server %s ok", c.addr) | |||
| defer session.Close() | |||
| for { | |||
| select { | |||
| case req := <-c.requests: | |||
| c.pending.PushBack(req) | |||
| length := len(c.requests) | |||
| for i := 0; i < length; i++ { | |||
| req = <-c.requests | |||
| c.pending.PushBack(req) | |||
| } | |||
| err = c.writeRequests(session) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| err = c.handleResponse(session) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| case addr := <-c.leaderCh: | |||
| oldAddr := c.addr | |||
| c.addr = addr | |||
| return errors.Errorf("leader change %s -> %s", oldAddr, addr) | |||
| } | |||
| } | |||
| } | |||
| func (c *Client) workerLoop() { | |||
| // first get tso leader | |||
| c.addr = <-c.leaderCh | |||
| log.Debugf("try to connect tso server %s", c.addr) | |||
| for { | |||
| err := c.do() | |||
| if err != nil { | |||
| c.cleanupPending(err) | |||
| } | |||
| select { | |||
| case <-time.After(1 * time.Second): | |||
| case addr := <-c.leaderCh: | |||
| // If old tso server down, NewConnection will fail and return immediately in do function, | |||
| // so we must check leader change here. | |||
| log.Warnf("leader change %s -> %s", c.addr, addr) | |||
| c.addr = addr | |||
| // Wait some time to let tso server allow accepting connections. | |||
| time.Sleep(1 * time.Second) | |||
| } | |||
| } | |||
| } | |||
| func (c *Client) watchLeader() { | |||
| var ( | |||
| conn zkhelper.Conn | |||
| err error | |||
| ) | |||
| for { | |||
| conn, err = zkhelper.ConnectToZkWithTimeout(c.conf.ZKAddr, time.Second) | |||
| if err != nil { | |||
| log.Errorf("connect zk err %v, retry later", err) | |||
| time.Sleep(3 * time.Second) | |||
| continue | |||
| } | |||
| break | |||
| } | |||
| defer conn.Close() | |||
| var lastAddr string | |||
| for { | |||
| addr, watcher, err := util.GetWatchLeader(conn, c.conf.RootPath) | |||
| if err != nil { | |||
| log.Errorf("get tso leader err %v, retry later", err) | |||
| time.Sleep(3 * time.Second) | |||
| continue | |||
| } | |||
| if lastAddr != addr { | |||
| log.Warnf("leader change %s -> %s", lastAddr, addr) | |||
| lastAddr = addr | |||
| c.leaderCh <- addr | |||
| } | |||
| // watch the leader changes. | |||
| <-watcher | |||
| } | |||
| } | |||
| // GoGetTimestamp returns a PipelineRequest so you can get the timestamp later. | |||
| func (c *Client) GoGetTimestamp() *PipelineRequest { | |||
| pr := newPipelineRequest() | |||
| c.requests <- pr | |||
| return pr | |||
| } | |||
| @@ -1,50 +0,0 @@ | |||
| package client | |||
| import ( | |||
| "bufio" | |||
| "net" | |||
| "time" | |||
| "github.com/ngaut/deadline" | |||
| ) | |||
| // Conn is the connection for timestamp oracle server, it is not thread safe. | |||
| type Conn struct { | |||
| addr string | |||
| net.Conn | |||
| closed bool | |||
| r *bufio.Reader | |||
| w *bufio.Writer | |||
| netTimeout time.Duration | |||
| } | |||
| // NewConnection creates a conn. | |||
| func NewConnection(addr string, netTimeout time.Duration) (*Conn, error) { | |||
| conn, err := net.DialTimeout("tcp", addr, netTimeout) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &Conn{ | |||
| addr: addr, | |||
| Conn: conn, | |||
| r: bufio.NewReaderSize(deadline.NewDeadlineReader(conn, netTimeout), 512*1024), | |||
| w: bufio.NewWriterSize(deadline.NewDeadlineWriter(conn, netTimeout), 512*1024), | |||
| netTimeout: netTimeout, | |||
| }, nil | |||
| } | |||
| // Read reads data and stores it into p. | |||
| func (c *Conn) Read(p []byte) (int, error) { | |||
| return c.r.Read(p) | |||
| } | |||
| // Flush flushs buffered data. | |||
| func (c *Conn) Flush() error { | |||
| return c.w.Flush() | |||
| } | |||
| // Write writes p. | |||
| func (c *Conn) Write(p []byte) (int, error) { | |||
| return c.w.Write(p) | |||
| } | |||
| @@ -1,45 +0,0 @@ | |||
| package proto | |||
| import ( | |||
| "encoding/binary" | |||
| "io" | |||
| "github.com/juju/errors" | |||
| ) | |||
| // RequestHeader is for tso request proto. | |||
| type RequestHeader struct { | |||
| } | |||
| // Timestamp is for tso timestamp. | |||
| type Timestamp struct { | |||
| Physical int64 | |||
| Logical int64 | |||
| } | |||
| // Response is for tso reponse proto. | |||
| type Response struct { | |||
| Timestamp | |||
| } | |||
| // Encode encodes repsonse proto into w. | |||
| func (res *Response) Encode(w io.Writer) error { | |||
| var buf [16]byte | |||
| binary.BigEndian.PutUint64(buf[0:8], uint64(res.Physical)) | |||
| binary.BigEndian.PutUint64(buf[8:16], uint64(res.Logical)) | |||
| _, err := w.Write(buf[0:16]) | |||
| return errors.Trace(err) | |||
| } | |||
| // Decode decodes reponse proto from r. | |||
| func (res *Response) Decode(r io.Reader) error { | |||
| var buf [16]byte | |||
| _, err := io.ReadFull(r, buf[0:16]) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| res.Physical = int64(binary.BigEndian.Uint64(buf[0:8])) | |||
| res.Logical = int64(binary.BigEndian.Uint64(buf[8:16])) | |||
| return nil | |||
| } | |||
| @@ -1,81 +0,0 @@ | |||
| // Copyright 2015 PingCAP, Inc. | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| package util | |||
| import ( | |||
| "encoding/json" | |||
| "path" | |||
| "github.com/juju/errors" | |||
| "github.com/ngaut/go-zookeeper/zk" | |||
| "github.com/ngaut/zkhelper" | |||
| ) | |||
| func getLeader(data []byte) (string, error) { | |||
| m := struct { | |||
| Addr string `json:"Addr"` | |||
| }{} | |||
| err := json.Unmarshal(data, &m) | |||
| if err != nil { | |||
| return "", errors.Trace(err) | |||
| } | |||
| return m.Addr, nil | |||
| } | |||
| // getLeaderPath gets the leader path in zookeeper. | |||
| func getLeaderPath(rootPath string) string { | |||
| return path.Join(rootPath, "leader") | |||
| } | |||
| // func checkLeaderExists(conn zkhelper.Conn) error { | |||
| // // the leader node is not ephemeral, so we may meet no any tso server but leader node | |||
| // // has the data for last closed tso server. | |||
| // // TODO: check children in /candidates, if no child, we will treat it as no leader too. | |||
| // return nil | |||
| // } | |||
| // GetLeaderAddr gets the leader tso address in zookeeper for outer use. | |||
| func GetLeader(conn zkhelper.Conn, rootPath string) (string, error) { | |||
| data, _, err := conn.Get(getLeaderPath(rootPath)) | |||
| if err != nil { | |||
| return "", errors.Trace(err) | |||
| } | |||
| // if err != checkLeaderExists(conn); err != nil { | |||
| // return "", errors.Trace(err) | |||
| // } | |||
| return getLeader(data) | |||
| } | |||
| // GetWatchLeader gets the leader tso address in zookeeper and returns a watcher for leader change. | |||
| func GetWatchLeader(conn zkhelper.Conn, rootPath string) (string, <-chan zk.Event, error) { | |||
| data, _, watcher, err := conn.GetW(getLeaderPath(rootPath)) | |||
| if err != nil { | |||
| return "", nil, errors.Trace(err) | |||
| } | |||
| addr, err := getLeader(data) | |||
| if err != nil { | |||
| return "", nil, errors.Trace(err) | |||
| } | |||
| // if err != checkLeaderExists(conn); err != nil { | |||
| // return "", errors.Trace(err) | |||
| // } | |||
| return addr, watcher, nil | |||
| } | |||
| @@ -1,53 +0,0 @@ | |||
| package zkhelper | |||
| import ( | |||
| zk "github.com/ngaut/go-zookeeper/zk" | |||
| ) | |||
| /* | |||
| type Stat interface { | |||
| Czxid() int64 | |||
| Mzxid() int64 | |||
| CTime() time.Time | |||
| MTime() time.Time | |||
| Version() int | |||
| CVersion() int | |||
| AVersion() int | |||
| EphemeralOwner() int64 | |||
| DataLength() int | |||
| NumChildren() int | |||
| Pzxid() int64 | |||
| } | |||
| */ | |||
| // This interface is really close to the zookeeper connection | |||
| // interface. It uses the Stat interface defined here instead of the | |||
| // zookeeper.Stat structure for stats. Everything else is the same as | |||
| // in zookeeper. So refer to the zookeeper docs for the conventions | |||
| // used here (for instance, using -1 as version to specify any | |||
| // version) | |||
| type Conn interface { | |||
| Get(path string) (data []byte, stat zk.Stat, err error) | |||
| GetW(path string) (data []byte, stat zk.Stat, watch <-chan zk.Event, err error) | |||
| Children(path string) (children []string, stat zk.Stat, err error) | |||
| ChildrenW(path string) (children []string, stat zk.Stat, watch <-chan zk.Event, err error) | |||
| Exists(path string) (exist bool, stat zk.Stat, err error) | |||
| ExistsW(path string) (exist bool, stat zk.Stat, watch <-chan zk.Event, err error) | |||
| Create(path string, value []byte, flags int32, aclv []zk.ACL) (pathCreated string, err error) | |||
| Set(path string, value []byte, version int32) (stat zk.Stat, err error) | |||
| Delete(path string, version int32) (err error) | |||
| Close() | |||
| //RetryChange(path string, flags int, acl []ACL, changeFunc ChangeFunc) error | |||
| GetACL(path string) ([]zk.ACL, zk.Stat, error) | |||
| SetACL(path string, aclv []zk.ACL, version int32) (zk.Stat, error) | |||
| Seq2Str(seq int64) string | |||
| } | |||
| @@ -1,472 +0,0 @@ | |||
| package zkhelper | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "path" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| etcderr "github.com/coreos/etcd/error" | |||
| "github.com/coreos/go-etcd/etcd" | |||
| zk "github.com/ngaut/go-zookeeper/zk" | |||
| "github.com/ngaut/log" | |||
| "github.com/ngaut/pools" | |||
| ) | |||
| var ( | |||
| singleInstanceLock sync.Mutex | |||
| etcdInstance *etcdImpl | |||
| ) | |||
| type PooledEtcdClient struct { | |||
| c *etcd.Client | |||
| } | |||
| func (c *PooledEtcdClient) Close() { | |||
| } | |||
| func (e *etcdImpl) Seq2Str(seq int64) string { | |||
| return fmt.Sprintf("%d", seq) | |||
| } | |||
| type etcdImpl struct { | |||
| sync.Mutex | |||
| cluster string | |||
| pool *pools.ResourcePool | |||
| indexMap map[string]uint64 | |||
| } | |||
| func convertToZkError(err error) error { | |||
| //todo: convert other errors | |||
| if ec, ok := err.(*etcd.EtcdError); ok { | |||
| switch ec.ErrorCode { | |||
| case etcderr.EcodeKeyNotFound: | |||
| return zk.ErrNoNode | |||
| case etcderr.EcodeNotFile: | |||
| case etcderr.EcodeNotDir: | |||
| case etcderr.EcodeNodeExist: | |||
| return zk.ErrNodeExists | |||
| case etcderr.EcodeDirNotEmpty: | |||
| return zk.ErrNotEmpty | |||
| } | |||
| } | |||
| return err | |||
| } | |||
| func convertToZkEvent(watchPath string, resp *etcd.Response, err error) zk.Event { | |||
| //log.Infof("convert event from path:%s, %+v, %+v", watchPath, resp, resp.Node.Key) | |||
| var e zk.Event | |||
| if err != nil { | |||
| e.Err = convertToZkError(err) | |||
| e.State = zk.StateDisconnected | |||
| return e | |||
| } | |||
| e.State = zk.StateConnected | |||
| e.Path = resp.Node.Key | |||
| if len(resp.Node.Key) > len(watchPath) { | |||
| e.Type = zk.EventNodeChildrenChanged | |||
| return e | |||
| } | |||
| switch resp.Action { | |||
| case "set": | |||
| e.Type = zk.EventNodeDataChanged | |||
| case "delete": | |||
| e.Type = zk.EventNodeDeleted | |||
| case "update": | |||
| e.Type = zk.EventNodeDataChanged | |||
| case "create": | |||
| e.Type = zk.EventNodeCreated | |||
| case "expire": | |||
| e.Type = zk.EventNotWatching | |||
| } | |||
| return e | |||
| } | |||
| func NewEtcdConn(zkAddr string) (Conn, error) { | |||
| singleInstanceLock.Lock() | |||
| defer singleInstanceLock.Unlock() | |||
| if etcdInstance != nil { | |||
| return etcdInstance, nil | |||
| } | |||
| p := pools.NewResourcePool(func() (pools.Resource, error) { | |||
| cluster := strings.Split(zkAddr, ",") | |||
| for i, addr := range cluster { | |||
| if !strings.HasPrefix(addr, "http://") { | |||
| cluster[i] = "http://" + addr | |||
| } | |||
| } | |||
| newClient := etcd.NewClient(cluster) | |||
| newClient.SetConsistency(etcd.STRONG_CONSISTENCY) | |||
| return &PooledEtcdClient{c: newClient}, nil | |||
| }, 10, 10, 0) | |||
| etcdInstance = &etcdImpl{ | |||
| cluster: zkAddr, | |||
| pool: p, | |||
| indexMap: make(map[string]uint64), | |||
| } | |||
| log.Infof("new etcd %s", zkAddr) | |||
| if etcdInstance == nil { | |||
| return nil, errors.New("unknown error") | |||
| } | |||
| return etcdInstance, nil | |||
| } | |||
| func (e *etcdImpl) Get(key string) (data []byte, stat zk.Stat, err error) { | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| resp, err := c.Get(key, true, false) | |||
| if resp == nil { | |||
| return nil, nil, convertToZkError(err) | |||
| } | |||
| return []byte(resp.Node.Value), nil, nil | |||
| } | |||
| func (e *etcdImpl) setIndex(key string, index uint64) { | |||
| e.Lock() | |||
| defer e.Unlock() | |||
| e.indexMap[key] = index | |||
| } | |||
| func (e *etcdImpl) getIndex(key string) uint64 { | |||
| e.Lock() | |||
| defer e.Unlock() | |||
| index := e.indexMap[key] | |||
| return index | |||
| } | |||
| func (e *etcdImpl) watch(key string, children bool) (resp *etcd.Response, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return nil, nil, nil, err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| index := e.getIndex(key) | |||
| resp, err = c.Get(key, true, true) | |||
| if resp == nil { | |||
| return nil, nil, nil, convertToZkError(err) | |||
| } | |||
| if index < resp.Node.ModifiedIndex { | |||
| index = resp.Node.ModifiedIndex | |||
| } | |||
| for _, n := range resp.Node.Nodes { | |||
| if n.ModifiedIndex > index { | |||
| index = n.ModifiedIndex | |||
| } | |||
| } | |||
| log.Info("try watch", key) | |||
| ch := make(chan zk.Event, 100) | |||
| originVal := resp.Node.Value | |||
| go func() { | |||
| defer func() { | |||
| e.setIndex(key, index) | |||
| }() | |||
| for { | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| log.Error(err) | |||
| return | |||
| } | |||
| c := conn.(*PooledEtcdClient).c | |||
| resp, err := c.Watch(key, index, children, nil, nil) | |||
| e.pool.Put(conn) | |||
| if err != nil { | |||
| if ec, ok := err.(*etcd.EtcdError); ok { | |||
| if ec.ErrorCode == etcderr.EcodeEventIndexCleared { | |||
| index++ | |||
| continue | |||
| } | |||
| } | |||
| log.Warning("watch", err) | |||
| ch <- convertToZkEvent(key, resp, err) | |||
| return | |||
| } | |||
| if key == resp.Node.Key && originVal == string(resp.Node.Value) { //keep alive event | |||
| index++ | |||
| continue | |||
| } | |||
| ch <- convertToZkEvent(key, resp, err) | |||
| //update index | |||
| if index <= resp.Node.ModifiedIndex { | |||
| index = resp.Node.ModifiedIndex + 1 | |||
| } else { | |||
| index++ | |||
| } | |||
| return | |||
| } | |||
| }() | |||
| return resp, nil, ch, nil | |||
| } | |||
| func (e *etcdImpl) GetW(key string) (data []byte, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| resp, stat, watch, err := e.watch(key, false) | |||
| if err != nil { | |||
| return | |||
| } | |||
| return []byte(resp.Node.Value), stat, watch, nil | |||
| } | |||
| func (e *etcdImpl) Children(key string) (children []string, stat zk.Stat, err error) { | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| resp, err := c.Get(key, true, false) | |||
| if resp == nil { | |||
| return nil, nil, convertToZkError(err) | |||
| } | |||
| for _, c := range resp.Node.Nodes { | |||
| children = append(children, path.Base(c.Key)) | |||
| } | |||
| return | |||
| } | |||
| func (e *etcdImpl) ChildrenW(key string) (children []string, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| resp, stat, watch, err := e.watch(key, true) | |||
| if err != nil { | |||
| return nil, stat, nil, convertToZkError(err) | |||
| } | |||
| for _, c := range resp.Node.Nodes { | |||
| children = append(children, path.Base(c.Key)) | |||
| } | |||
| return children, stat, watch, nil | |||
| } | |||
| func (e *etcdImpl) Exists(key string) (exist bool, stat zk.Stat, err error) { | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return false, nil, err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| _, err = c.Get(key, true, false) | |||
| if err == nil { | |||
| return true, nil, nil | |||
| } | |||
| if ec, ok := err.(*etcd.EtcdError); ok { | |||
| if ec.ErrorCode == etcderr.EcodeKeyNotFound { | |||
| return false, nil, nil | |||
| } | |||
| } | |||
| return false, nil, convertToZkError(err) | |||
| } | |||
| func (e *etcdImpl) ExistsW(key string) (exist bool, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| _, stat, watch, err = e.watch(key, false) | |||
| if err != nil { | |||
| return false, nil, nil, convertToZkError(err) | |||
| } | |||
| return true, nil, watch, nil | |||
| } | |||
| const MAX_TTL = 365 * 24 * 60 * 60 | |||
| func (e *etcdImpl) doKeepAlive(key string, ttl uint64) error { | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| resp, err := c.Get(key, false, false) | |||
| if err != nil { | |||
| log.Error(err) | |||
| return err | |||
| } | |||
| if resp.Node.Dir { | |||
| return fmt.Errorf("can not set ttl to directory", key) | |||
| } | |||
| //log.Info("keep alive ", key) | |||
| resp, err = c.CompareAndSwap(key, resp.Node.Value, ttl, resp.Node.Value, resp.Node.ModifiedIndex) | |||
| if err == nil { | |||
| return nil | |||
| } | |||
| if ec, ok := err.(*etcd.EtcdError); ok && ec.ErrorCode == etcderr.EcodeTestFailed { | |||
| return nil | |||
| } | |||
| return err | |||
| } | |||
| //todo:add test for keepAlive | |||
| func (e *etcdImpl) keepAlive(key string, ttl uint64) { | |||
| go func() { | |||
| for { | |||
| time.Sleep(1 * time.Second) | |||
| err := e.doKeepAlive(key, ttl) | |||
| if err != nil { | |||
| log.Error(err) | |||
| return | |||
| } | |||
| } | |||
| }() | |||
| } | |||
| func (e *etcdImpl) Create(wholekey string, value []byte, flags int32, aclv []zk.ACL) (keyCreated string, err error) { | |||
| seq := (flags & zk.FlagSequence) != 0 | |||
| tmp := (flags & zk.FlagEphemeral) != 0 | |||
| ttl := uint64(MAX_TTL) | |||
| if tmp { | |||
| ttl = 5 | |||
| } | |||
| var resp *etcd.Response | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| fn := c.Create | |||
| log.Info("create", wholekey) | |||
| if seq { | |||
| wholekey = path.Dir(wholekey) | |||
| fn = c.CreateInOrder | |||
| } else { | |||
| for _, v := range aclv { | |||
| if v.Perms == PERM_DIRECTORY { | |||
| log.Info("etcdImpl:create directory", wholekey) | |||
| fn = nil | |||
| resp, err = c.CreateDir(wholekey, uint64(ttl)) | |||
| if err != nil { | |||
| return "", convertToZkError(err) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| if fn == nil { | |||
| if tmp { | |||
| e.keepAlive(wholekey, ttl) | |||
| } | |||
| return resp.Node.Key, nil | |||
| } | |||
| resp, err = fn(wholekey, string(value), uint64(ttl)) | |||
| if err != nil { | |||
| return "", convertToZkError(err) | |||
| } | |||
| if tmp { | |||
| e.keepAlive(resp.Node.Key, ttl) | |||
| } | |||
| return resp.Node.Key, nil | |||
| } | |||
| func (e *etcdImpl) Set(key string, value []byte, version int32) (stat zk.Stat, err error) { | |||
| if version == 0 { | |||
| return nil, errors.New("invalid version") | |||
| } | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| resp, err := c.Get(key, true, false) | |||
| if resp == nil { | |||
| return nil, convertToZkError(err) | |||
| } | |||
| _, err = c.Set(key, string(value), uint64(resp.Node.TTL)) | |||
| return nil, convertToZkError(err) | |||
| } | |||
| func (e *etcdImpl) Delete(key string, version int32) (err error) { | |||
| //todo: handle version | |||
| conn, err := e.pool.Get() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer e.pool.Put(conn) | |||
| c := conn.(*PooledEtcdClient).c | |||
| resp, err := c.Get(key, true, false) | |||
| if resp == nil { | |||
| return convertToZkError(err) | |||
| } | |||
| if resp.Node.Dir { | |||
| _, err = c.DeleteDir(key) | |||
| } else { | |||
| _, err = c.Delete(key, false) | |||
| } | |||
| return convertToZkError(err) | |||
| } | |||
| func (e *etcdImpl) GetACL(key string) ([]zk.ACL, zk.Stat, error) { | |||
| return nil, nil, nil | |||
| } | |||
| func (e *etcdImpl) SetACL(key string, aclv []zk.ACL, version int32) (zk.Stat, error) { | |||
| return nil, nil | |||
| } | |||
| func (e *etcdImpl) Close() { | |||
| //how to implement this | |||
| } | |||
| @@ -1,519 +0,0 @@ | |||
| // Copyright 2013, Google Inc. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package fakezk is a pretty complete mock implementation of a | |||
| // Zookeper connection (see go/zk/zk.Conn). All operations | |||
| // work as expected with the exceptions of zk.Conn.ACL and | |||
| // zk.Conn.SetACL. zk.Conn.SetACL will succeed, but it is a noop (and | |||
| // the ACLs won't be respected). zk.Conn.ACL will panic. It is OK to | |||
| // access the connection from multiple goroutines, but the locking is | |||
| // very naive (every operation locks the whole connection). | |||
| package zkhelper | |||
| import ( | |||
| "bytes" | |||
| "encoding/json" | |||
| "fmt" | |||
| "io/ioutil" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "github.com/ngaut/go-zookeeper/zk" | |||
| ) | |||
| type zconn struct { | |||
| mu sync.Mutex | |||
| root *stat | |||
| zxid int64 | |||
| existWatches map[string][]chan zk.Event | |||
| } | |||
| func (conn *zconn) getZxid() int64 { | |||
| conn.zxid++ | |||
| return conn.zxid | |||
| } | |||
| func (conn *zconn) Seq2Str(seq int64) string { | |||
| return fmt.Sprintf("%0.10d", seq) | |||
| } | |||
| // NewConn returns a fake zk.Conn implementation. Data is stored in | |||
| // memory, and there's a global connection lock for concurrent access. | |||
| func NewConn() Conn { | |||
| return &zconn{ | |||
| root: &stat{ | |||
| name: "/", | |||
| children: make(map[string]*stat), | |||
| }, | |||
| existWatches: make(map[string][]chan zk.Event)} | |||
| } | |||
| // NewConnFromFile returns a fake zk.Conn implementation, that is seeded | |||
| // with the json data extracted from the input file. | |||
| func NewConnFromFile(filename string) Conn { | |||
| result := &zconn{ | |||
| root: &stat{ | |||
| name: "/", | |||
| children: make(map[string]*stat), | |||
| }, | |||
| existWatches: make(map[string][]chan zk.Event)} | |||
| data, err := ioutil.ReadFile(filename) | |||
| if err != nil { | |||
| panic(fmt.Errorf("NewConnFromFile failed to read file %v: %v", filename, err)) | |||
| } | |||
| values := make(map[string]interface{}) | |||
| if err := json.Unmarshal(data, &values); err != nil { | |||
| panic(fmt.Errorf("NewConnFromFile failed to json.Unmarshal file %v: %v", filename, err)) | |||
| } | |||
| for k, v := range values { | |||
| jv, err := json.Marshal(v) | |||
| if err != nil { | |||
| panic(fmt.Errorf("NewConnFromFile failed to json.Marshal value %v: %v", k, err)) | |||
| } | |||
| // CreateRecursive will work for a leaf node where the parent | |||
| // doesn't exist, but not for a node in the middle of a tree | |||
| // that already exists. So have to use 'Set' as a backup. | |||
| if _, err := CreateRecursive(result, k, string(jv), 0, nil); err != nil { | |||
| if ZkErrorEqual(err, zk.ErrNodeExists) { | |||
| _, err = result.Set(k, jv, -1) | |||
| } | |||
| if err != nil { | |||
| panic(fmt.Errorf("NewConnFromFile failed to zk.CreateRecursive value %v: %v", k, err)) | |||
| } | |||
| } | |||
| } | |||
| return result | |||
| } | |||
| func (conn *zconn) GetACL(path string) ([]zk.ACL, zk.Stat, error) { | |||
| return nil, nil, nil | |||
| } | |||
| func (conn *zconn) Get(zkPath string) (data []byte, stat zk.Stat, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| node, _, rest, err := conn.getNode(zkPath, "get") | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if len(rest) != 0 { | |||
| return nil, nil, zkError(zk.ErrNoNode, "get", zkPath) | |||
| } | |||
| return []byte(node.content), node, nil | |||
| } | |||
| func (conn *zconn) GetW(zkPath string) (data []byte, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| node, _, rest, err := conn.getNode(zkPath, "getw") | |||
| if err != nil { | |||
| return nil, nil, nil, err | |||
| } | |||
| if len(rest) != 0 { | |||
| return nil, nil, nil, zkError(zk.ErrNoNode, "getw", zkPath) | |||
| } | |||
| c := make(chan zk.Event, 1) | |||
| node.changeWatches = append(node.changeWatches, c) | |||
| return []byte(node.content), node, c, nil | |||
| } | |||
| func (conn *zconn) Children(zkPath string) (children []string, stat zk.Stat, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| //println("Children:", conn.String()) | |||
| node, _, rest, err := conn.getNode(zkPath, "children") | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if len(rest) != 0 { | |||
| return nil, nil, zkError(zk.ErrNoNode, "children", zkPath) | |||
| } | |||
| for name := range node.children { | |||
| children = append(children, name) | |||
| } | |||
| return children, node, nil | |||
| } | |||
| func (conn *zconn) ChildrenW(zkPath string) (children []string, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| //println("ChildrenW:", conn.String()) | |||
| node, _, rest, err := conn.getNode(zkPath, "childrenw") | |||
| if err != nil { | |||
| return nil, nil, nil, err | |||
| } | |||
| if len(rest) != 0 { | |||
| return nil, nil, nil, zkError(zk.ErrNoNode, "childrenw", zkPath) | |||
| } | |||
| c := make(chan zk.Event, 1) | |||
| node.childrenWatches = append(node.childrenWatches, c) | |||
| for name := range node.children { | |||
| children = append(children, name) | |||
| } | |||
| return children, node, c, nil | |||
| } | |||
| func (conn *zconn) Exists(zkPath string) (exist bool, stat zk.Stat, err error) { | |||
| // FIXME(szopa): if the path is bad, Op will be "get." | |||
| exist = false | |||
| _, stat, err = conn.Get(zkPath) | |||
| if err != nil { | |||
| if ZkErrorEqual(err, zk.ErrNoNode) { | |||
| err = nil | |||
| } | |||
| } else { | |||
| exist = true | |||
| } | |||
| return exist, stat, err | |||
| } | |||
| func (conn *zconn) ExistsW(zkPath string) (exist bool, stat zk.Stat, watch <-chan zk.Event, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| exist = false | |||
| c := make(chan zk.Event, 1) | |||
| node, _, rest, err := conn.getNode(zkPath, "existsw") | |||
| if err != nil { | |||
| return exist, nil, nil, err | |||
| } | |||
| if len(rest) != 0 { | |||
| watches, ok := conn.existWatches[zkPath] | |||
| if !ok { | |||
| watches = make([]chan zk.Event, 0) | |||
| conn.existWatches[zkPath] = watches | |||
| } | |||
| conn.existWatches[zkPath] = append(watches, c) | |||
| return exist, nil, c, nil | |||
| } | |||
| exist = true | |||
| node.existWatches = append(node.existWatches, c) | |||
| return exist, node, c, nil | |||
| } | |||
| func (conn *zconn) Create(zkPath string, value []byte, flags int32, aclv []zk.ACL) (zkPathCreated string, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| node, _, rest, err := conn.getNode(zkPath, "create") | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| if len(rest) == 0 { | |||
| return "", zkError(zk.ErrNodeExists, "create", zkPath) | |||
| } | |||
| if len(rest) > 1 { | |||
| return "", zkError(zk.ErrNoNode, "create", zkPath) | |||
| } | |||
| zxid := conn.getZxid() | |||
| name := rest[0] | |||
| if (flags & zk.FlagSequence) != 0 { | |||
| sequence := node.nextSequence() | |||
| name += sequence | |||
| zkPath = zkPath + sequence | |||
| } | |||
| stat := &stat{ | |||
| name: name, | |||
| content: string(value), | |||
| children: make(map[string]*stat), | |||
| acl: aclv, | |||
| mtime: time.Now(), | |||
| ctime: time.Now(), | |||
| czxid: zxid, | |||
| mzxid: zxid, | |||
| existWatches: make([]chan zk.Event, 0), | |||
| } | |||
| node.children[name] = stat | |||
| event := zk.Event{ | |||
| Type: zk.EventNodeCreated, | |||
| Path: zkPath, | |||
| State: zk.StateConnected, | |||
| } | |||
| if watches, ok := conn.existWatches[zkPath]; ok { | |||
| delete(conn.existWatches, zkPath) | |||
| for _, watch := range watches { | |||
| watch <- event | |||
| } | |||
| } | |||
| childrenEvent := zk.Event{ | |||
| Type: zk.EventNodeChildrenChanged, | |||
| Path: zkPath, | |||
| State: zk.StateConnected, | |||
| } | |||
| for _, watch := range node.childrenWatches { | |||
| watch <- childrenEvent | |||
| close(watch) | |||
| } | |||
| node.childrenWatches = nil | |||
| node.cversion++ | |||
| return zkPath, nil | |||
| } | |||
| func (conn *zconn) Set(zkPath string, value []byte, version int32) (stat zk.Stat, err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| node, _, rest, err := conn.getNode(zkPath, "set") | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if len(rest) != 0 { | |||
| return nil, zkError(zk.ErrNoNode, "set", zkPath) | |||
| } | |||
| if version != -1 && node.version != int(version) { | |||
| return nil, zkError(zk.ErrBadVersion, "set", zkPath) | |||
| } | |||
| node.content = string(value) | |||
| node.version++ | |||
| for _, watch := range node.changeWatches { | |||
| watch <- zk.Event{ | |||
| Type: zk.EventNodeDataChanged, | |||
| Path: zkPath, | |||
| State: zk.StateConnected, | |||
| } | |||
| } | |||
| node.changeWatches = nil | |||
| return node, nil | |||
| } | |||
| func (conn *zconn) Delete(zkPath string, version int32) (err error) { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| node, parent, rest, err := conn.getNode(zkPath, "delete") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if len(rest) > 0 { | |||
| return zkError(zk.ErrNoNode, "delete", zkPath) | |||
| } | |||
| if len(node.children) > 0 { | |||
| return zkError(zk.ErrNotEmpty, "delete", zkPath) | |||
| } | |||
| delete(parent.children, node.name) | |||
| event := zk.Event{ | |||
| Type: zk.EventNodeDeleted, | |||
| Path: zkPath, | |||
| State: zk.StateConnected, | |||
| } | |||
| for _, watch := range node.existWatches { | |||
| watch <- event | |||
| } | |||
| for _, watch := range node.changeWatches { | |||
| watch <- event | |||
| } | |||
| node.existWatches = nil | |||
| node.changeWatches = nil | |||
| childrenEvent := zk.Event{ | |||
| Type: zk.EventNodeChildrenChanged, | |||
| Path: zkPath, | |||
| State: zk.StateConnected} | |||
| for _, watch := range parent.childrenWatches { | |||
| watch <- childrenEvent | |||
| } | |||
| return nil | |||
| } | |||
| func (conn *zconn) Close() { | |||
| conn.mu.Lock() | |||
| defer conn.mu.Unlock() | |||
| for _, watches := range conn.existWatches { | |||
| for _, c := range watches { | |||
| close(c) | |||
| } | |||
| } | |||
| conn.root.closeAllWatches() | |||
| } | |||
| /* | |||
| func (conn *zconn) RetryChange(path string, flags int, acl []zk.ACL, changeFunc zk.ChangeFunc) error { | |||
| for { | |||
| oldValue, oldStat, err := conn.Get(path) | |||
| if err != nil && !ZkErrorEqual(err, zk.ErrNoNode) { | |||
| return err | |||
| } | |||
| newValue, err := changeFunc(oldValue, oldStat) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if oldStat == nil { | |||
| _, err := conn.Create(path, newValue, flags, acl) | |||
| if err == nil || !ZkErrorEqual(err, zk.ZNODEEXISTS) { | |||
| return err | |||
| } | |||
| continue | |||
| } | |||
| if newValue == oldValue { | |||
| return nil // Nothing to do. | |||
| } | |||
| _, err = conn.Set(path, newValue, oldStat.Version()) | |||
| if err == nil || !ZkErrorEqual(err, zk.ZBADVERSION) && !ZkErrorEqual(err, zk.ErrNoNode) { | |||
| return err | |||
| } | |||
| } | |||
| } | |||
| */ | |||
| func (conn *zconn) SetACL(zkPath string, aclv []zk.ACL, version int32) (zk.Stat, error) { | |||
| return nil, nil | |||
| } | |||
| func (conn *zconn) getNode(zkPath string, op string) (node *stat, parent *stat, rest []string, err error) { | |||
| // FIXME(szopa): Make sure the path starts with /. | |||
| parts := strings.Split(zkPath, "/") | |||
| if parts[0] != "" { | |||
| //todo: fix this, error bad arguments | |||
| return nil, nil, nil, zkError(zk.ErrUnknown, op, zkPath) | |||
| } | |||
| elements := parts[1:] | |||
| parent = nil | |||
| current := conn.root | |||
| for i, el := range elements { | |||
| candidateParent := current | |||
| candidate, ok := current.children[el] | |||
| if !ok { | |||
| return current, parent, elements[i:], nil | |||
| } | |||
| current, parent = candidate, candidateParent | |||
| } | |||
| return current, parent, []string{}, nil | |||
| } | |||
| type ZkError struct { | |||
| Code error | |||
| Op string | |||
| Path string | |||
| } | |||
| func (ze *ZkError) Error() string { | |||
| return ze.Code.Error() | |||
| } | |||
| // zkError creates an appropriate error return from | |||
| // a ZooKeeper status | |||
| func zkError(code error, op, path string) error { | |||
| return &ZkError{ | |||
| Op: op, | |||
| Code: code, | |||
| Path: path, | |||
| } | |||
| } | |||
| type stat struct { | |||
| name string | |||
| content string | |||
| children map[string]*stat | |||
| acl []zk.ACL | |||
| mtime time.Time | |||
| ctime time.Time | |||
| czxid int64 | |||
| mzxid int64 | |||
| pzxid int64 | |||
| version int | |||
| cversion int | |||
| aversion int | |||
| sequence int | |||
| existWatches []chan zk.Event | |||
| changeWatches []chan zk.Event | |||
| childrenWatches []chan zk.Event | |||
| } | |||
| func (st stat) closeAllWatches() { | |||
| for _, c := range st.existWatches { | |||
| close(c) | |||
| } | |||
| for _, c := range st.changeWatches { | |||
| close(c) | |||
| } | |||
| for _, c := range st.childrenWatches { | |||
| close(c) | |||
| } | |||
| for _, child := range st.children { | |||
| child.closeAllWatches() | |||
| } | |||
| } | |||
| func (st stat) Czxid() int64 { | |||
| return st.czxid | |||
| } | |||
| func (st stat) Mzxid() int64 { | |||
| return st.mzxid | |||
| } | |||
| func (st stat) CTime() time.Time { | |||
| return st.ctime | |||
| } | |||
| func (st stat) MTime() time.Time { | |||
| return st.mtime | |||
| } | |||
| func (st stat) Version() int { | |||
| return st.version | |||
| } | |||
| func (st stat) CVersion() int { | |||
| return st.cversion | |||
| } | |||
| func (st stat) AVersion() int { | |||
| return st.aversion | |||
| } | |||
| func (st stat) EphemeralOwner() int64 { | |||
| return 0 | |||
| } | |||
| func (st stat) DataLength() int { | |||
| return len(st.content) | |||
| } | |||
| func (st stat) NumChildren() int { | |||
| return len(st.children) | |||
| } | |||
| func (st stat) Pzxid() int64 { | |||
| return st.pzxid | |||
| } | |||
| func (st *stat) nextSequence() string { | |||
| st.sequence++ | |||
| return fmt.Sprintf("%010d", st.sequence) | |||
| } | |||
| func (st stat) fprintRecursive(level int, buf *bytes.Buffer) { | |||
| start := strings.Repeat(" ", level) | |||
| fmt.Fprintf(buf, "%v-%v:\n", start, st.name) | |||
| if st.content != "" { | |||
| fmt.Fprintf(buf, "%v content: %q\n\n", start, st.content) | |||
| } | |||
| if len(st.children) > 0 { | |||
| for _, child := range st.children { | |||
| child.fprintRecursive(level+1, buf) | |||
| } | |||
| } | |||
| } | |||
| func (conn *zconn) String() string { | |||
| b := new(bytes.Buffer) | |||
| conn.root.fprintRecursive(0, b) | |||
| return b.String() | |||
| } | |||
| @@ -1,899 +0,0 @@ | |||
| // zk helper functions | |||
| // modified from Vitess project | |||
| package zkhelper | |||
| import ( | |||
| "encoding/json" | |||
| "errors" | |||
| "fmt" | |||
| "math/rand" | |||
| "os" | |||
| "path" | |||
| "sort" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "github.com/ngaut/go-zookeeper/zk" | |||
| "github.com/ngaut/log" | |||
| ) | |||
| var ( | |||
| // This error is returned by functions that wait for a result | |||
| // when they are interrupted. | |||
| ErrInterrupted = errors.New("zkutil: obtaining lock was interrupted") | |||
| // This error is returned by functions that wait for a result | |||
| // when the timeout value is reached. | |||
| ErrTimeout = errors.New("zkutil: obtaining lock timed out") | |||
| ) | |||
| const ( | |||
| // PERM_DIRECTORY are default permissions for a node. | |||
| PERM_DIRECTORY = zk.PermAdmin | zk.PermCreate | zk.PermDelete | zk.PermRead | zk.PermWrite | |||
| // PERM_FILE allows a zk node to emulate file behavior by disallowing child nodes. | |||
| PERM_FILE = zk.PermAdmin | zk.PermRead | zk.PermWrite | |||
| MagicPrefix = "zk" | |||
| ) | |||
| func init() { | |||
| rand.Seed(time.Now().UnixNano()) | |||
| } | |||
| type MyZkConn struct { | |||
| *zk.Conn | |||
| } | |||
| func (conn *MyZkConn) Seq2Str(seq int64) string { | |||
| return fmt.Sprintf("%0.10d", seq) | |||
| } | |||
| func ConnectToZk(zkAddr string) (Conn, error) { | |||
| zkConn, _, err := zk.Connect(strings.Split(zkAddr, ","), 3*time.Second) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &MyZkConn{Conn: zkConn}, nil | |||
| } | |||
| func ConnectToZkWithTimeout(zkAddr string, recvTime time.Duration) (Conn, error) { | |||
| zkConn, _, err := zk.Connect(strings.Split(zkAddr, ","), recvTime) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &MyZkConn{Conn: zkConn}, nil | |||
| } | |||
| func DefaultACLs() []zk.ACL { | |||
| return zk.WorldACL(zk.PermAll) | |||
| } | |||
| func DefaultDirACLs() []zk.ACL { | |||
| return zk.WorldACL(PERM_DIRECTORY) | |||
| } | |||
| func DefaultFileACLs() []zk.ACL { | |||
| return zk.WorldACL(PERM_FILE) | |||
| } | |||
| // IsDirectory returns if this node should be treated as a directory. | |||
| func IsDirectory(aclv []zk.ACL) bool { | |||
| for _, acl := range aclv { | |||
| if acl.Perms != PERM_DIRECTORY { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| func ZkErrorEqual(a, b error) bool { | |||
| if a != nil && b != nil { | |||
| return a.Error() == b.Error() | |||
| } | |||
| return a == b | |||
| } | |||
| // Create a path and any pieces required, think mkdir -p. | |||
| // Intermediate znodes are always created empty. | |||
| func CreateRecursive(zconn Conn, zkPath, value string, flags int, aclv []zk.ACL) (pathCreated string, err error) { | |||
| parts := strings.Split(zkPath, "/") | |||
| if parts[1] != MagicPrefix { | |||
| return "", fmt.Errorf("zkutil: non /%v path: %v", MagicPrefix, zkPath) | |||
| } | |||
| pathCreated, err = zconn.Create(zkPath, []byte(value), int32(flags), aclv) | |||
| if ZkErrorEqual(err, zk.ErrNoNode) { | |||
| // Make sure that nodes are either "file" or "directory" to mirror file system | |||
| // semantics. | |||
| dirAclv := make([]zk.ACL, len(aclv)) | |||
| for i, acl := range aclv { | |||
| dirAclv[i] = acl | |||
| dirAclv[i].Perms = PERM_DIRECTORY | |||
| } | |||
| _, err = CreateRecursive(zconn, path.Dir(zkPath), "", flags, dirAclv) | |||
| if err != nil && !ZkErrorEqual(err, zk.ErrNodeExists) { | |||
| return "", err | |||
| } | |||
| pathCreated, err = zconn.Create(zkPath, []byte(value), int32(flags), aclv) | |||
| } | |||
| return | |||
| } | |||
| func CreateOrUpdate(zconn Conn, zkPath, value string, flags int, aclv []zk.ACL, recursive bool) (pathCreated string, err error) { | |||
| if recursive { | |||
| pathCreated, err = CreateRecursive(zconn, zkPath, value, 0, aclv) | |||
| } else { | |||
| pathCreated, err = zconn.Create(zkPath, []byte(value), 0, aclv) | |||
| } | |||
| if err != nil && ZkErrorEqual(err, zk.ErrNodeExists) { | |||
| pathCreated = "" | |||
| _, err = zconn.Set(zkPath, []byte(value), -1) | |||
| } | |||
| return | |||
| } | |||
| type pathItem struct { | |||
| path string | |||
| err error | |||
| } | |||
| func ChildrenRecursive(zconn Conn, zkPath string) ([]string, error) { | |||
| var err error | |||
| mutex := sync.Mutex{} | |||
| wg := sync.WaitGroup{} | |||
| pathList := make([]string, 0, 32) | |||
| children, _, err := zconn.Children(zkPath) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| for _, child := range children { | |||
| wg.Add(1) | |||
| go func(child string) { | |||
| childPath := path.Join(zkPath, child) | |||
| rChildren, zkErr := ChildrenRecursive(zconn, childPath) | |||
| if zkErr != nil { | |||
| // If other processes are deleting nodes, we need to ignore | |||
| // the missing nodes. | |||
| if !ZkErrorEqual(zkErr, zk.ErrNoNode) { | |||
| mutex.Lock() | |||
| err = zkErr | |||
| mutex.Unlock() | |||
| } | |||
| } else { | |||
| mutex.Lock() | |||
| pathList = append(pathList, child) | |||
| for _, rChild := range rChildren { | |||
| pathList = append(pathList, path.Join(child, rChild)) | |||
| } | |||
| mutex.Unlock() | |||
| } | |||
| wg.Done() | |||
| }(child) | |||
| } | |||
| wg.Wait() | |||
| mutex.Lock() | |||
| defer mutex.Unlock() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return pathList, nil | |||
| } | |||
| func HasWildcard(path string) bool { | |||
| for i := 0; i < len(path); i++ { | |||
| switch path[i] { | |||
| case '\\': | |||
| if i+1 >= len(path) { | |||
| return true | |||
| } else { | |||
| i++ | |||
| } | |||
| case '*', '?', '[': | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| func resolveRecursive(zconn Conn, parts []string, toplevel bool) ([]string, error) { | |||
| for i, part := range parts { | |||
| if HasWildcard(part) { | |||
| var children []string | |||
| zkParentPath := strings.Join(parts[:i], "/") | |||
| var err error | |||
| children, _, err = zconn.Children(zkParentPath) | |||
| if err != nil { | |||
| // we asked for something like | |||
| // /zk/cell/aaa/* and | |||
| // /zk/cell/aaa doesn't exist | |||
| // -> return empty list, no error | |||
| // (note we check both a regular zk | |||
| // error and the error the test | |||
| // produces) | |||
| if ZkErrorEqual(err, zk.ErrNoNode) { | |||
| return nil, nil | |||
| } | |||
| // otherwise we return the error | |||
| return nil, err | |||
| } | |||
| sort.Strings(children) | |||
| results := make([][]string, len(children)) | |||
| wg := &sync.WaitGroup{} | |||
| mu := &sync.Mutex{} | |||
| var firstError error | |||
| for j, child := range children { | |||
| matched, err := path.Match(part, child) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if matched { | |||
| // we have a match! | |||
| wg.Add(1) | |||
| newParts := make([]string, len(parts)) | |||
| copy(newParts, parts) | |||
| newParts[i] = child | |||
| go func(j int) { | |||
| defer wg.Done() | |||
| subResult, err := resolveRecursive(zconn, newParts, false) | |||
| if err != nil { | |||
| mu.Lock() | |||
| if firstError != nil { | |||
| log.Infof("Multiple error: %v", err) | |||
| } else { | |||
| firstError = err | |||
| } | |||
| mu.Unlock() | |||
| } else { | |||
| results[j] = subResult | |||
| } | |||
| }(j) | |||
| } | |||
| } | |||
| wg.Wait() | |||
| if firstError != nil { | |||
| return nil, firstError | |||
| } | |||
| result := make([]string, 0, 32) | |||
| for j := 0; j < len(children); j++ { | |||
| subResult := results[j] | |||
| if subResult != nil { | |||
| result = append(result, subResult...) | |||
| } | |||
| } | |||
| // we found a part that is a wildcard, we | |||
| // added the children already, we're done | |||
| return result, nil | |||
| } | |||
| } | |||
| // no part contains a wildcard, add the path if it exists, and done | |||
| path := strings.Join(parts, "/") | |||
| if toplevel { | |||
| // for whatever the user typed at the toplevel, we don't | |||
| // check it exists or not, we just return it | |||
| return []string{path}, nil | |||
| } | |||
| // this is an expanded path, we need to check if it exists | |||
| _, stat, err := zconn.Exists(path) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if stat != nil { | |||
| return []string{path}, nil | |||
| } | |||
| return nil, nil | |||
| } | |||
| // resolve paths like: | |||
| // /zk/nyc/vt/tablets/*/action | |||
| // /zk/global/vt/keyspaces/*/shards/*/action | |||
| // /zk/*/vt/tablets/*/action | |||
| // into real existing paths | |||
| // | |||
| // If you send paths that don't contain any wildcard and | |||
| // don't exist, this function will return an empty array. | |||
| func ResolveWildcards(zconn Conn, zkPaths []string) ([]string, error) { | |||
| // check all the paths start with /zk/ before doing anything | |||
| // time consuming | |||
| // relax this in case we are not talking to a metaconn and | |||
| // just want to talk to a specified instance. | |||
| // for _, zkPath := range zkPaths { | |||
| // if _, err := ZkCellFromZkPath(zkPath); err != nil { | |||
| // return nil, err | |||
| // } | |||
| // } | |||
| results := make([][]string, len(zkPaths)) | |||
| wg := &sync.WaitGroup{} | |||
| mu := &sync.Mutex{} | |||
| var firstError error | |||
| for i, zkPath := range zkPaths { | |||
| wg.Add(1) | |||
| parts := strings.Split(zkPath, "/") | |||
| go func(i int) { | |||
| defer wg.Done() | |||
| subResult, err := resolveRecursive(zconn, parts, true) | |||
| if err != nil { | |||
| mu.Lock() | |||
| if firstError != nil { | |||
| log.Infof("Multiple error: %v", err) | |||
| } else { | |||
| firstError = err | |||
| } | |||
| mu.Unlock() | |||
| } else { | |||
| results[i] = subResult | |||
| } | |||
| }(i) | |||
| } | |||
| wg.Wait() | |||
| if firstError != nil { | |||
| return nil, firstError | |||
| } | |||
| result := make([]string, 0, 32) | |||
| for i := 0; i < len(zkPaths); i++ { | |||
| subResult := results[i] | |||
| if subResult != nil { | |||
| result = append(result, subResult...) | |||
| } | |||
| } | |||
| return result, nil | |||
| } | |||
| func DeleteRecursive(zconn Conn, zkPath string, version int) error { | |||
| // version: -1 delete any version of the node at path - only applies to the top node | |||
| err := zconn.Delete(zkPath, int32(version)) | |||
| if err == nil { | |||
| return nil | |||
| } | |||
| if !ZkErrorEqual(err, zk.ErrNotEmpty) { | |||
| return err | |||
| } | |||
| // Remove the ability for other nodes to get created while we are trying to delete. | |||
| // Otherwise, you can enter a race condition, or get starved out from deleting. | |||
| _, err = zconn.SetACL(zkPath, zk.WorldACL(zk.PermAdmin|zk.PermDelete|zk.PermRead), int32(version)) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| children, _, err := zconn.Children(zkPath) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for _, child := range children { | |||
| err := DeleteRecursive(zconn, path.Join(zkPath, child), -1) | |||
| if err != nil && !ZkErrorEqual(err, zk.ErrNoNode) { | |||
| return fmt.Errorf("zkutil: recursive delete failed: %v", err) | |||
| } | |||
| } | |||
| err = zconn.Delete(zkPath, int32(version)) | |||
| if err != nil && !ZkErrorEqual(err, zk.ErrNotEmpty) { | |||
| err = fmt.Errorf("zkutil: nodes getting recreated underneath delete (app race condition): %v", zkPath) | |||
| } | |||
| return err | |||
| } | |||
| // The lexically lowest node is the lock holder - verify that this | |||
| // path holds the lock. Call this queue-lock because the semantics are | |||
| // a hybrid. Normal zk locks make assumptions about sequential | |||
| // numbering that don't hold when the data in a lock is modified. | |||
| // if the provided 'interrupted' chan is closed, we'll just stop waiting | |||
| // and return an interruption error | |||
| func ObtainQueueLock(zconn Conn, zkPath string, wait time.Duration, interrupted chan struct{}) error { | |||
| queueNode := path.Dir(zkPath) | |||
| lockNode := path.Base(zkPath) | |||
| timer := time.NewTimer(wait) | |||
| trylock: | |||
| children, _, err := zconn.Children(queueNode) | |||
| if err != nil { | |||
| return fmt.Errorf("zkutil: trylock failed %v", err) | |||
| } | |||
| sort.Strings(children) | |||
| if len(children) > 0 { | |||
| if children[0] == lockNode { | |||
| return nil | |||
| } | |||
| if wait > 0 { | |||
| prevLock := "" | |||
| for i := 1; i < len(children); i++ { | |||
| if children[i] == lockNode { | |||
| prevLock = children[i-1] | |||
| break | |||
| } | |||
| } | |||
| if prevLock == "" { | |||
| return fmt.Errorf("zkutil: no previous queue node found: %v", zkPath) | |||
| } | |||
| zkPrevLock := path.Join(queueNode, prevLock) | |||
| _, stat, watch, err := zconn.ExistsW(zkPrevLock) | |||
| if err != nil { | |||
| return fmt.Errorf("zkutil: unable to watch queued node %v %v", zkPrevLock, err) | |||
| } | |||
| if stat == nil { | |||
| goto trylock | |||
| } | |||
| select { | |||
| case <-timer.C: | |||
| break | |||
| case <-interrupted: | |||
| return ErrInterrupted | |||
| case <-watch: | |||
| // The precise event doesn't matter - try to read again regardless. | |||
| goto trylock | |||
| } | |||
| } | |||
| return ErrTimeout | |||
| } | |||
| return fmt.Errorf("zkutil: empty queue node: %v", queueNode) | |||
| } | |||
| func ZkEventOk(e zk.Event) bool { | |||
| return e.State == zk.StateConnected | |||
| } | |||
| func NodeExists(zconn Conn, zkPath string) (bool, error) { | |||
| b, _, err := zconn.Exists(zkPath) | |||
| return b, err | |||
| } | |||
| // Close the release channel when you want to clean up nicely. | |||
| func CreatePidNode(zconn Conn, zkPath string, contents string, done chan struct{}) error { | |||
| // On the first try, assume the cluster is up and running, that will | |||
| // help hunt down any config issues present at startup | |||
| if _, err := zconn.Create(zkPath, []byte(contents), zk.FlagEphemeral, zk.WorldACL(PERM_FILE)); err != nil { | |||
| if ZkErrorEqual(err, zk.ErrNodeExists) { | |||
| err = zconn.Delete(zkPath, -1) | |||
| } | |||
| if err != nil { | |||
| return fmt.Errorf("zkutil: failed deleting pid node: %v: %v", zkPath, err) | |||
| } | |||
| _, err = zconn.Create(zkPath, []byte(contents), zk.FlagEphemeral, zk.WorldACL(PERM_FILE)) | |||
| if err != nil { | |||
| return fmt.Errorf("zkutil: failed creating pid node: %v: %v", zkPath, err) | |||
| } | |||
| } | |||
| go func() { | |||
| for { | |||
| _, _, watch, err := zconn.GetW(zkPath) | |||
| if err != nil { | |||
| if ZkErrorEqual(err, zk.ErrNoNode) { | |||
| _, err = zconn.Create(zkPath, []byte(contents), zk.FlagEphemeral, zk.WorldACL(zk.PermAll)) | |||
| if err != nil { | |||
| log.Warningf("failed recreating pid node: %v: %v", zkPath, err) | |||
| } else { | |||
| log.Infof("recreated pid node: %v", zkPath) | |||
| continue | |||
| } | |||
| } else { | |||
| log.Warningf("failed reading pid node: %v", err) | |||
| } | |||
| } else { | |||
| select { | |||
| case event := <-watch: | |||
| if ZkEventOk(event) && event.Type == zk.EventNodeDeleted { | |||
| // Most likely another process has started up. However, | |||
| // there is a chance that an ephemeral node is deleted by | |||
| // the session expiring, yet that same session gets a watch | |||
| // notification. This seems like buggy behavior, but rather | |||
| // than race too hard on the node, just wait a bit and see | |||
| // if the situation resolves itself. | |||
| log.Warningf("pid deleted: %v", zkPath) | |||
| } else { | |||
| log.Infof("pid node event: %v", event) | |||
| } | |||
| // break here and wait for a bit before attempting | |||
| case <-done: | |||
| log.Infof("pid watcher stopped on done: %v", zkPath) | |||
| return | |||
| } | |||
| } | |||
| select { | |||
| // No one likes a thundering herd, least of all zk. | |||
| case <-time.After(5*time.Second + time.Duration(rand.Int63n(55e9))): | |||
| case <-done: | |||
| log.Infof("pid watcher stopped on done: %v", zkPath) | |||
| return | |||
| } | |||
| } | |||
| }() | |||
| return nil | |||
| } | |||
| // ZLocker is an interface for a lock that can fail. | |||
| type ZLocker interface { | |||
| Lock(desc string) error | |||
| LockWithTimeout(wait time.Duration, desc string) error | |||
| Unlock() error | |||
| Interrupt() | |||
| } | |||
| // Experiment with a little bit of abstraction. | |||
| // FIMXE(msolo) This object may need a mutex to ensure it can be shared | |||
| // across goroutines. | |||
| type zMutex struct { | |||
| mu sync.Mutex | |||
| zconn Conn | |||
| path string // Path under which we try to create lock nodes. | |||
| contents string | |||
| interrupted chan struct{} | |||
| name string // The name of the specific lock node we created. | |||
| ephemeral bool | |||
| } | |||
| // CreateMutex initializes an unaquired mutex. A mutex is released only | |||
| // by Unlock. You can clean up a mutex with delete, but you should be | |||
| // careful doing so. | |||
| func CreateMutex(zconn Conn, zkPath string) ZLocker { | |||
| zm, err := CreateMutexWithContents(zconn, zkPath, map[string]interface{}{}) | |||
| if err != nil { | |||
| panic(err) // should never happen | |||
| } | |||
| return zm | |||
| } | |||
| // CreateMutex initializes an unaquired mutex with special content for this mutex. | |||
| // A mutex is released only by Unlock. You can clean up a mutex with delete, but you should be | |||
| // careful doing so. | |||
| func CreateMutexWithContents(zconn Conn, zkPath string, contents map[string]interface{}) (ZLocker, error) { | |||
| hostname, err := os.Hostname() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| pid := os.Getpid() | |||
| contents["hostname"] = hostname | |||
| contents["pid"] = pid | |||
| data, err := json.Marshal(contents) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &zMutex{zconn: zconn, path: zkPath, contents: string(data), interrupted: make(chan struct{})}, nil | |||
| } | |||
| // Interrupt releases a lock that's held. | |||
| func (zm *zMutex) Interrupt() { | |||
| select { | |||
| case zm.interrupted <- struct{}{}: | |||
| default: | |||
| log.Warningf("zmutex interrupt blocked") | |||
| } | |||
| } | |||
| // Lock returns nil when the lock is acquired. | |||
| func (zm *zMutex) Lock(desc string) error { | |||
| return zm.LockWithTimeout(365*24*time.Hour, desc) | |||
| } | |||
| // LockWithTimeout returns nil when the lock is acquired. A lock is | |||
| // held if the file exists and you are the creator. Setting the wait | |||
| // to zero makes this a nonblocking lock check. | |||
| // | |||
| // FIXME(msolo) Disallow non-super users from removing the lock? | |||
| func (zm *zMutex) LockWithTimeout(wait time.Duration, desc string) (err error) { | |||
| timer := time.NewTimer(wait) | |||
| defer func() { | |||
| if panicErr := recover(); panicErr != nil || err != nil { | |||
| zm.deleteLock() | |||
| } | |||
| }() | |||
| // Ensure the rendezvous node is here. | |||
| // FIXME(msolo) Assuming locks are contended, it will be cheaper to assume this just | |||
| // exists. | |||
| _, err = CreateRecursive(zm.zconn, zm.path, "", 0, zk.WorldACL(PERM_DIRECTORY)) | |||
| if err != nil && !ZkErrorEqual(err, zk.ErrNodeExists) { | |||
| return err | |||
| } | |||
| lockPrefix := path.Join(zm.path, "lock-") | |||
| zflags := zk.FlagSequence | |||
| if zm.ephemeral { | |||
| zflags = zflags | zk.FlagEphemeral | |||
| } | |||
| // update node content | |||
| var lockContent map[string]interface{} | |||
| err = json.Unmarshal([]byte(zm.contents), &lockContent) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| lockContent["desc"] = desc | |||
| newContent, err := json.Marshal(lockContent) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| createlock: | |||
| lockCreated, err := zm.zconn.Create(lockPrefix, newContent, int32(zflags), zk.WorldACL(PERM_FILE)) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| name := path.Base(lockCreated) | |||
| zm.mu.Lock() | |||
| zm.name = name | |||
| zm.mu.Unlock() | |||
| trylock: | |||
| children, _, err := zm.zconn.Children(zm.path) | |||
| if err != nil { | |||
| return fmt.Errorf("zkutil: trylock failed %v", err) | |||
| } | |||
| sort.Strings(children) | |||
| if len(children) == 0 { | |||
| return fmt.Errorf("zkutil: empty lock: %v", zm.path) | |||
| } | |||
| if children[0] == name { | |||
| // We are the lock owner. | |||
| return nil | |||
| } | |||
| // This is the degenerate case of a nonblocking lock check. It's not optimal, but | |||
| // also probably not worth optimizing. | |||
| if wait == 0 { | |||
| return ErrTimeout | |||
| } | |||
| prevLock := "" | |||
| for i := 1; i < len(children); i++ { | |||
| if children[i] == name { | |||
| prevLock = children[i-1] | |||
| break | |||
| } | |||
| } | |||
| if prevLock == "" { | |||
| // This is an interesting case. The node disappeared | |||
| // underneath us, probably due to a session loss. We can | |||
| // recreate the lock node (with a new sequence number) and | |||
| // keep trying. | |||
| log.Warningf("zkutil: no lock node found: %v/%v", zm.path, zm.name) | |||
| goto createlock | |||
| } | |||
| zkPrevLock := path.Join(zm.path, prevLock) | |||
| exist, stat, watch, err := zm.zconn.ExistsW(zkPrevLock) | |||
| if err != nil { | |||
| // FIXME(msolo) Should this be a retry? | |||
| return fmt.Errorf("zkutil: unable to watch previous lock node %v %v", zkPrevLock, err) | |||
| } | |||
| if stat == nil || !exist { | |||
| goto trylock | |||
| } | |||
| select { | |||
| case <-timer.C: | |||
| return ErrTimeout | |||
| case <-zm.interrupted: | |||
| return ErrInterrupted | |||
| case event := <-watch: | |||
| log.Infof("zkutil: lock event: %v", event) | |||
| // The precise event doesn't matter - try to read again regardless. | |||
| goto trylock | |||
| } | |||
| panic("unexpected") | |||
| } | |||
| // Unlock returns nil if the lock was successfully | |||
| // released. Otherwise, it is most likely a zk related error. | |||
| func (zm *zMutex) Unlock() error { | |||
| return zm.deleteLock() | |||
| } | |||
| func (zm *zMutex) deleteLock() error { | |||
| zm.mu.Lock() | |||
| zpath := path.Join(zm.path, zm.name) | |||
| zm.mu.Unlock() | |||
| err := zm.zconn.Delete(zpath, -1) | |||
| if err != nil && !ZkErrorEqual(err, zk.ErrNoNode) { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // ZElector stores basic state for running an election. | |||
| type ZElector struct { | |||
| *zMutex | |||
| path string | |||
| leader string | |||
| } | |||
| func (ze *ZElector) isLeader() bool { | |||
| return ze.leader == ze.name | |||
| } | |||
| type electionEvent struct { | |||
| Event int | |||
| Err error | |||
| } | |||
| type backoffDelay struct { | |||
| min time.Duration | |||
| max time.Duration | |||
| delay time.Duration | |||
| } | |||
| func newBackoffDelay(min, max time.Duration) *backoffDelay { | |||
| return &backoffDelay{min, max, min} | |||
| } | |||
| func (bd *backoffDelay) NextDelay() time.Duration { | |||
| delay := bd.delay | |||
| bd.delay = 2 * bd.delay | |||
| if bd.delay > bd.max { | |||
| bd.delay = bd.max | |||
| } | |||
| return delay | |||
| } | |||
| func (bd *backoffDelay) Reset() { | |||
| bd.delay = bd.min | |||
| } | |||
| // ElectorTask is the interface for a task that runs essentially | |||
| // forever or until something bad happens. If a task must be stopped, | |||
| // it should be handled promptly - no second notification will be | |||
| // sent. | |||
| type ElectorTask interface { | |||
| Run() error | |||
| Stop() | |||
| // Return true if interrupted, false if it died of natural causes. | |||
| // An interrupted task indicates that the election should stop. | |||
| Interrupted() bool | |||
| } | |||
| // CreateElection returns an initialized elector. An election is | |||
| // really a cycle of events. You are flip-flopping between leader and | |||
| // candidate. It's better to think of this as a stream of events that | |||
| // one needs to react to. | |||
| func CreateElection(zconn Conn, zkPath string) ZElector { | |||
| zm, err := CreateElectionWithContents(zconn, zkPath, map[string]interface{}{}) | |||
| if err != nil { | |||
| // should never happend | |||
| panic(err) | |||
| } | |||
| return zm | |||
| } | |||
| // CreateElection returns an initialized elector with special contents. An election is | |||
| // really a cycle of events. You are flip-flopping between leader and | |||
| // candidate. It's better to think of this as a stream of events that | |||
| // one needs to react to. | |||
| func CreateElectionWithContents(zconn Conn, zkPath string, contents map[string]interface{}) (ZElector, error) { | |||
| l, err := CreateMutexWithContents(zconn, path.Join(zkPath, "candidates"), contents) | |||
| if err != nil { | |||
| return ZElector{}, err | |||
| } | |||
| zm := l.(*zMutex) | |||
| zm.ephemeral = true | |||
| return ZElector{zMutex: zm, path: zkPath}, nil | |||
| } | |||
| // RunTask returns nil when the underlyingtask ends or the error it | |||
| // generated. | |||
| func (ze *ZElector) RunTask(task ElectorTask) error { | |||
| delay := newBackoffDelay(100*time.Millisecond, 1*time.Minute) | |||
| leaderPath := path.Join(ze.path, "leader") | |||
| for { | |||
| _, err := CreateRecursive(ze.zconn, leaderPath, "", 0, zk.WorldACL(PERM_FILE)) | |||
| if err == nil || ZkErrorEqual(err, zk.ErrNodeExists) { | |||
| break | |||
| } | |||
| log.Warningf("election leader create failed: %v", err) | |||
| time.Sleep(delay.NextDelay()) | |||
| } | |||
| for { | |||
| err := ze.Lock("RunTask") | |||
| if err != nil { | |||
| log.Warningf("election lock failed: %v", err) | |||
| if err == ErrInterrupted { | |||
| return ErrInterrupted | |||
| } | |||
| continue | |||
| } | |||
| // Confirm your win and deliver acceptance speech. This notifies | |||
| // listeners who will have been watching the leader node for | |||
| // changes. | |||
| _, err = ze.zconn.Set(leaderPath, []byte(ze.contents), -1) | |||
| if err != nil { | |||
| log.Warningf("election promotion failed: %v", err) | |||
| continue | |||
| } | |||
| log.Infof("election promote leader %v", leaderPath) | |||
| taskErrChan := make(chan error) | |||
| go func() { | |||
| taskErrChan <- task.Run() | |||
| }() | |||
| watchLeader: | |||
| // Watch the leader so we can get notified if something goes wrong. | |||
| data, _, watch, err := ze.zconn.GetW(leaderPath) | |||
| if err != nil { | |||
| log.Warningf("election unable to watch leader node %v %v", leaderPath, err) | |||
| // FIXME(msolo) Add delay | |||
| goto watchLeader | |||
| } | |||
| if string(data) != ze.contents { | |||
| log.Warningf("election unable to promote leader") | |||
| task.Stop() | |||
| // We won the election, but we didn't become the leader. How is that possible? | |||
| // (see Bush v. Gore for some inspiration) | |||
| // It means: | |||
| // 1. Someone isn't playing by the election rules (a bad actor). | |||
| // Hard to detect - let's assume we don't have this problem. :) | |||
| // 2. We lost our connection somehow and the ephemeral lock was cleared, | |||
| // allowing someone else to win the election. | |||
| continue | |||
| } | |||
| // This is where we start our target process and watch for its failure. | |||
| waitForEvent: | |||
| select { | |||
| case <-ze.interrupted: | |||
| log.Warning("election interrupted - stop child process") | |||
| task.Stop() | |||
| // Once the process dies from the signal, this will all tear down. | |||
| goto waitForEvent | |||
| case taskErr := <-taskErrChan: | |||
| // If our code fails, unlock to trigger an election. | |||
| log.Infof("election child process ended: %v", taskErr) | |||
| ze.Unlock() | |||
| if task.Interrupted() { | |||
| log.Warningf("election child process interrupted - stepping down") | |||
| return ErrInterrupted | |||
| } | |||
| continue | |||
| case zevent := <-watch: | |||
| // We had a zk connection hiccup. We have a few choices, | |||
| // but it depends on the constraints and the events. | |||
| // | |||
| // If we get SESSION_EXPIRED our connection loss triggered an | |||
| // election that we won't have won and the thus the lock was | |||
| // automatically freed. We have no choice but to start over. | |||
| if zevent.State == zk.StateExpired { | |||
| log.Warningf("election leader watch expired") | |||
| task.Stop() | |||
| continue | |||
| } | |||
| // Otherwise, we had an intermittent issue or something touched | |||
| // the node. Either we lost our position or someone broke | |||
| // protocol and touched the leader node. We just reconnect and | |||
| // revalidate. In the meantime, assume we are still the leader | |||
| // until we determine otherwise. | |||
| // | |||
| // On a reconnect we will be able to see the leader | |||
| // information. If we still hold the position, great. If not, we | |||
| // kill the associated process. | |||
| // | |||
| // On a leader node change, we need to perform the same | |||
| // validation. It's possible an election completes without the | |||
| // old leader realizing he is out of touch. | |||
| log.Warningf("election leader watch event %v", zevent) | |||
| goto watchLeader | |||
| } | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| @@ -1,4 +0,0 @@ | |||
| Petar Maymounkov <petar@5ttt.org> | |||
| Vadim Vygonets <vadik@vygo.net> | |||
| Ian Smith <iansmith@acm.org> | |||
| Martin Bruse | |||
| @@ -1,27 +0,0 @@ | |||
| Copyright (c) 2010, Petar Maymounkov | |||
| All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without modification, | |||
| are permitted provided that the following conditions are met: | |||
| (*) Redistributions of source code must retain the above copyright notice, this list | |||
| of conditions and the following disclaimer. | |||
| (*) Redistributions in binary form must reproduce the above copyright notice, this | |||
| list of conditions and the following disclaimer in the documentation and/or | |||
| other materials provided with the distribution. | |||
| (*) Neither the name of Petar Maymounkov nor the names of its contributors may be | |||
| used to endorse or promote products derived from this software without specific | |||
| prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | |||
| ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |||
| WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
| DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR | |||
| ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |||
| (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |||
| LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | |||
| ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |||
| SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -1,39 +0,0 @@ | |||
| // Copyright 2010 Petar Maymounkov. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package llrb | |||
| import "math" | |||
| // avgVar maintains the average and variance of a stream of numbers | |||
| // in a space-efficient manner. | |||
| type avgVar struct { | |||
| count int64 | |||
| sum, sumsq float64 | |||
| } | |||
| func (av *avgVar) Init() { | |||
| av.count = 0 | |||
| av.sum = 0.0 | |||
| av.sumsq = 0.0 | |||
| } | |||
| func (av *avgVar) Add(sample float64) { | |||
| av.count++ | |||
| av.sum += sample | |||
| av.sumsq += sample * sample | |||
| } | |||
| func (av *avgVar) GetCount() int64 { return av.count } | |||
| func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) } | |||
| func (av *avgVar) GetTotal() float64 { return av.sum } | |||
| func (av *avgVar) GetVar() float64 { | |||
| a := av.GetAvg() | |||
| return av.sumsq/float64(av.count) - a*a | |||
| } | |||
| func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) } | |||
| @@ -1,93 +0,0 @@ | |||
| package llrb | |||
| type ItemIterator func(i Item) bool | |||
| //func (t *Tree) Ascend(iterator ItemIterator) { | |||
| // t.AscendGreaterOrEqual(Inf(-1), iterator) | |||
| //} | |||
| func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { | |||
| t.ascendRange(t.root, greaterOrEqual, lessThan, iterator) | |||
| } | |||
| func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool { | |||
| if h == nil { | |||
| return true | |||
| } | |||
| if !less(h.Item, sup) { | |||
| return t.ascendRange(h.Left, inf, sup, iterator) | |||
| } | |||
| if less(h.Item, inf) { | |||
| return t.ascendRange(h.Right, inf, sup, iterator) | |||
| } | |||
| if !t.ascendRange(h.Left, inf, sup, iterator) { | |||
| return false | |||
| } | |||
| if !iterator(h.Item) { | |||
| return false | |||
| } | |||
| return t.ascendRange(h.Right, inf, sup, iterator) | |||
| } | |||
| // AscendGreaterOrEqual will call iterator once for each element greater or equal to | |||
| // pivot in ascending order. It will stop whenever the iterator returns false. | |||
| func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { | |||
| t.ascendGreaterOrEqual(t.root, pivot, iterator) | |||
| } | |||
| func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { | |||
| if h == nil { | |||
| return true | |||
| } | |||
| if !less(h.Item, pivot) { | |||
| if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) { | |||
| return false | |||
| } | |||
| if !iterator(h.Item) { | |||
| return false | |||
| } | |||
| } | |||
| return t.ascendGreaterOrEqual(h.Right, pivot, iterator) | |||
| } | |||
| func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) { | |||
| t.ascendLessThan(t.root, pivot, iterator) | |||
| } | |||
| func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool { | |||
| if h == nil { | |||
| return true | |||
| } | |||
| if !t.ascendLessThan(h.Left, pivot, iterator) { | |||
| return false | |||
| } | |||
| if !iterator(h.Item) { | |||
| return false | |||
| } | |||
| if less(h.Item, pivot) { | |||
| return t.ascendLessThan(h.Left, pivot, iterator) | |||
| } | |||
| return true | |||
| } | |||
| // DescendLessOrEqual will call iterator once for each element less than the | |||
| // pivot in descending order. It will stop whenever the iterator returns false. | |||
| func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) { | |||
| t.descendLessOrEqual(t.root, pivot, iterator) | |||
| } | |||
| func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { | |||
| if h == nil { | |||
| return true | |||
| } | |||
| if less(h.Item, pivot) || !less(pivot, h.Item) { | |||
| if !t.descendLessOrEqual(h.Right, pivot, iterator) { | |||
| return false | |||
| } | |||
| if !iterator(h.Item) { | |||
| return false | |||
| } | |||
| } | |||
| return t.descendLessOrEqual(h.Left, pivot, iterator) | |||
| } | |||
| @@ -1,46 +0,0 @@ | |||
| // Copyright 2010 Petar Maymounkov. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package llrb | |||
| // GetHeight() returns an item in the tree with key @key, and it's height in the tree | |||
| func (t *LLRB) GetHeight(key Item) (result Item, depth int) { | |||
| return t.getHeight(t.root, key) | |||
| } | |||
| func (t *LLRB) getHeight(h *Node, item Item) (Item, int) { | |||
| if h == nil { | |||
| return nil, 0 | |||
| } | |||
| if less(item, h.Item) { | |||
| result, depth := t.getHeight(h.Left, item) | |||
| return result, depth + 1 | |||
| } | |||
| if less(h.Item, item) { | |||
| result, depth := t.getHeight(h.Right, item) | |||
| return result, depth + 1 | |||
| } | |||
| return h.Item, 0 | |||
| } | |||
| // HeightStats() returns the average and standard deviation of the height | |||
| // of elements in the tree | |||
| func (t *LLRB) HeightStats() (avg, stddev float64) { | |||
| av := &avgVar{} | |||
| heightStats(t.root, 0, av) | |||
| return av.GetAvg(), av.GetStdDev() | |||
| } | |||
| func heightStats(h *Node, d int, av *avgVar) { | |||
| if h == nil { | |||
| return | |||
| } | |||
| av.Add(float64(d)) | |||
| if h.Left != nil { | |||
| heightStats(h.Left, d+1, av) | |||
| } | |||
| if h.Right != nil { | |||
| heightStats(h.Right, d+1, av) | |||
| } | |||
| } | |||
| @@ -1,456 +0,0 @@ | |||
| // Copyright 2010 Petar Maymounkov. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees, | |||
| // based on the following work: | |||
| // | |||
| // http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf | |||
| // http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf | |||
| // http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java | |||
| // | |||
| // 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST | |||
| // algoritms found in implementations of Python, Java, and other libraries. The LLRB | |||
| // implementation of 2-3 trees is a recent improvement on the traditional implementation, | |||
| // observed and documented by Robert Sedgewick. | |||
| // | |||
| package llrb | |||
| // Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees | |||
| type LLRB struct { | |||
| count int | |||
| root *Node | |||
| } | |||
| type Node struct { | |||
| Item | |||
| Left, Right *Node // Pointers to left and right child nodes | |||
| Black bool // If set, the color of the link (incoming from the parent) is black | |||
| // In the LLRB, new nodes are always red, hence the zero-value for node | |||
| } | |||
| type Item interface { | |||
| Less(than Item) bool | |||
| } | |||
| // | |||
| func less(x, y Item) bool { | |||
| if x == pinf { | |||
| return false | |||
| } | |||
| if x == ninf { | |||
| return true | |||
| } | |||
| return x.Less(y) | |||
| } | |||
| // Inf returns an Item that is "bigger than" any other item, if sign is positive. | |||
| // Otherwise it returns an Item that is "smaller than" any other item. | |||
| func Inf(sign int) Item { | |||
| if sign == 0 { | |||
| panic("sign") | |||
| } | |||
| if sign > 0 { | |||
| return pinf | |||
| } | |||
| return ninf | |||
| } | |||
| var ( | |||
| ninf = nInf{} | |||
| pinf = pInf{} | |||
| ) | |||
| type nInf struct{} | |||
| func (nInf) Less(Item) bool { | |||
| return true | |||
| } | |||
| type pInf struct{} | |||
| func (pInf) Less(Item) bool { | |||
| return false | |||
| } | |||
| // New() allocates a new tree | |||
| func New() *LLRB { | |||
| return &LLRB{} | |||
| } | |||
| // SetRoot sets the root node of the tree. | |||
| // It is intended to be used by functions that deserialize the tree. | |||
| func (t *LLRB) SetRoot(r *Node) { | |||
| t.root = r | |||
| } | |||
| // Root returns the root node of the tree. | |||
| // It is intended to be used by functions that serialize the tree. | |||
| func (t *LLRB) Root() *Node { | |||
| return t.root | |||
| } | |||
| // Len returns the number of nodes in the tree. | |||
| func (t *LLRB) Len() int { return t.count } | |||
| // Has returns true if the tree contains an element whose order is the same as that of key. | |||
| func (t *LLRB) Has(key Item) bool { | |||
| return t.Get(key) != nil | |||
| } | |||
| // Get retrieves an element from the tree whose order is the same as that of key. | |||
| func (t *LLRB) Get(key Item) Item { | |||
| h := t.root | |||
| for h != nil { | |||
| switch { | |||
| case less(key, h.Item): | |||
| h = h.Left | |||
| case less(h.Item, key): | |||
| h = h.Right | |||
| default: | |||
| return h.Item | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // Min returns the minimum element in the tree. | |||
| func (t *LLRB) Min() Item { | |||
| h := t.root | |||
| if h == nil { | |||
| return nil | |||
| } | |||
| for h.Left != nil { | |||
| h = h.Left | |||
| } | |||
| return h.Item | |||
| } | |||
| // Max returns the maximum element in the tree. | |||
| func (t *LLRB) Max() Item { | |||
| h := t.root | |||
| if h == nil { | |||
| return nil | |||
| } | |||
| for h.Right != nil { | |||
| h = h.Right | |||
| } | |||
| return h.Item | |||
| } | |||
| func (t *LLRB) ReplaceOrInsertBulk(items ...Item) { | |||
| for _, i := range items { | |||
| t.ReplaceOrInsert(i) | |||
| } | |||
| } | |||
| func (t *LLRB) InsertNoReplaceBulk(items ...Item) { | |||
| for _, i := range items { | |||
| t.InsertNoReplace(i) | |||
| } | |||
| } | |||
| // ReplaceOrInsert inserts item into the tree. If an existing | |||
| // element has the same order, it is removed from the tree and returned. | |||
| func (t *LLRB) ReplaceOrInsert(item Item) Item { | |||
| if item == nil { | |||
| panic("inserting nil item") | |||
| } | |||
| var replaced Item | |||
| t.root, replaced = t.replaceOrInsert(t.root, item) | |||
| t.root.Black = true | |||
| if replaced == nil { | |||
| t.count++ | |||
| } | |||
| return replaced | |||
| } | |||
| func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) { | |||
| if h == nil { | |||
| return newNode(item), nil | |||
| } | |||
| h = walkDownRot23(h) | |||
| var replaced Item | |||
| if less(item, h.Item) { // BUG | |||
| h.Left, replaced = t.replaceOrInsert(h.Left, item) | |||
| } else if less(h.Item, item) { | |||
| h.Right, replaced = t.replaceOrInsert(h.Right, item) | |||
| } else { | |||
| replaced, h.Item = h.Item, item | |||
| } | |||
| h = walkUpRot23(h) | |||
| return h, replaced | |||
| } | |||
| // InsertNoReplace inserts item into the tree. If an existing | |||
| // element has the same order, both elements remain in the tree. | |||
| func (t *LLRB) InsertNoReplace(item Item) { | |||
| if item == nil { | |||
| panic("inserting nil item") | |||
| } | |||
| t.root = t.insertNoReplace(t.root, item) | |||
| t.root.Black = true | |||
| t.count++ | |||
| } | |||
| func (t *LLRB) insertNoReplace(h *Node, item Item) *Node { | |||
| if h == nil { | |||
| return newNode(item) | |||
| } | |||
| h = walkDownRot23(h) | |||
| if less(item, h.Item) { | |||
| h.Left = t.insertNoReplace(h.Left, item) | |||
| } else { | |||
| h.Right = t.insertNoReplace(h.Right, item) | |||
| } | |||
| return walkUpRot23(h) | |||
| } | |||
| // Rotation driver routines for 2-3 algorithm | |||
| func walkDownRot23(h *Node) *Node { return h } | |||
| func walkUpRot23(h *Node) *Node { | |||
| if isRed(h.Right) && !isRed(h.Left) { | |||
| h = rotateLeft(h) | |||
| } | |||
| if isRed(h.Left) && isRed(h.Left.Left) { | |||
| h = rotateRight(h) | |||
| } | |||
| if isRed(h.Left) && isRed(h.Right) { | |||
| flip(h) | |||
| } | |||
| return h | |||
| } | |||
| // Rotation driver routines for 2-3-4 algorithm | |||
| func walkDownRot234(h *Node) *Node { | |||
| if isRed(h.Left) && isRed(h.Right) { | |||
| flip(h) | |||
| } | |||
| return h | |||
| } | |||
| func walkUpRot234(h *Node) *Node { | |||
| if isRed(h.Right) && !isRed(h.Left) { | |||
| h = rotateLeft(h) | |||
| } | |||
| if isRed(h.Left) && isRed(h.Left.Left) { | |||
| h = rotateRight(h) | |||
| } | |||
| return h | |||
| } | |||
| // DeleteMin deletes the minimum element in the tree and returns the | |||
| // deleted item or nil otherwise. | |||
| func (t *LLRB) DeleteMin() Item { | |||
| var deleted Item | |||
| t.root, deleted = deleteMin(t.root) | |||
| if t.root != nil { | |||
| t.root.Black = true | |||
| } | |||
| if deleted != nil { | |||
| t.count-- | |||
| } | |||
| return deleted | |||
| } | |||
| // deleteMin code for LLRB 2-3 trees | |||
| func deleteMin(h *Node) (*Node, Item) { | |||
| if h == nil { | |||
| return nil, nil | |||
| } | |||
| if h.Left == nil { | |||
| return nil, h.Item | |||
| } | |||
| if !isRed(h.Left) && !isRed(h.Left.Left) { | |||
| h = moveRedLeft(h) | |||
| } | |||
| var deleted Item | |||
| h.Left, deleted = deleteMin(h.Left) | |||
| return fixUp(h), deleted | |||
| } | |||
| // DeleteMax deletes the maximum element in the tree and returns | |||
| // the deleted item or nil otherwise | |||
| func (t *LLRB) DeleteMax() Item { | |||
| var deleted Item | |||
| t.root, deleted = deleteMax(t.root) | |||
| if t.root != nil { | |||
| t.root.Black = true | |||
| } | |||
| if deleted != nil { | |||
| t.count-- | |||
| } | |||
| return deleted | |||
| } | |||
| func deleteMax(h *Node) (*Node, Item) { | |||
| if h == nil { | |||
| return nil, nil | |||
| } | |||
| if isRed(h.Left) { | |||
| h = rotateRight(h) | |||
| } | |||
| if h.Right == nil { | |||
| return nil, h.Item | |||
| } | |||
| if !isRed(h.Right) && !isRed(h.Right.Left) { | |||
| h = moveRedRight(h) | |||
| } | |||
| var deleted Item | |||
| h.Right, deleted = deleteMax(h.Right) | |||
| return fixUp(h), deleted | |||
| } | |||
| // Delete deletes an item from the tree whose key equals key. | |||
| // The deleted item is return, otherwise nil is returned. | |||
| func (t *LLRB) Delete(key Item) Item { | |||
| var deleted Item | |||
| t.root, deleted = t.delete(t.root, key) | |||
| if t.root != nil { | |||
| t.root.Black = true | |||
| } | |||
| if deleted != nil { | |||
| t.count-- | |||
| } | |||
| return deleted | |||
| } | |||
| func (t *LLRB) delete(h *Node, item Item) (*Node, Item) { | |||
| var deleted Item | |||
| if h == nil { | |||
| return nil, nil | |||
| } | |||
| if less(item, h.Item) { | |||
| if h.Left == nil { // item not present. Nothing to delete | |||
| return h, nil | |||
| } | |||
| if !isRed(h.Left) && !isRed(h.Left.Left) { | |||
| h = moveRedLeft(h) | |||
| } | |||
| h.Left, deleted = t.delete(h.Left, item) | |||
| } else { | |||
| if isRed(h.Left) { | |||
| h = rotateRight(h) | |||
| } | |||
| // If @item equals @h.Item and no right children at @h | |||
| if !less(h.Item, item) && h.Right == nil { | |||
| return nil, h.Item | |||
| } | |||
| // PETAR: Added 'h.Right != nil' below | |||
| if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) { | |||
| h = moveRedRight(h) | |||
| } | |||
| // If @item equals @h.Item, and (from above) 'h.Right != nil' | |||
| if !less(h.Item, item) { | |||
| var subDeleted Item | |||
| h.Right, subDeleted = deleteMin(h.Right) | |||
| if subDeleted == nil { | |||
| panic("logic") | |||
| } | |||
| deleted, h.Item = h.Item, subDeleted | |||
| } else { // Else, @item is bigger than @h.Item | |||
| h.Right, deleted = t.delete(h.Right, item) | |||
| } | |||
| } | |||
| return fixUp(h), deleted | |||
| } | |||
| // Internal node manipulation routines | |||
| func newNode(item Item) *Node { return &Node{Item: item} } | |||
| func isRed(h *Node) bool { | |||
| if h == nil { | |||
| return false | |||
| } | |||
| return !h.Black | |||
| } | |||
| func rotateLeft(h *Node) *Node { | |||
| x := h.Right | |||
| if x.Black { | |||
| panic("rotating a black link") | |||
| } | |||
| h.Right = x.Left | |||
| x.Left = h | |||
| x.Black = h.Black | |||
| h.Black = false | |||
| return x | |||
| } | |||
| func rotateRight(h *Node) *Node { | |||
| x := h.Left | |||
| if x.Black { | |||
| panic("rotating a black link") | |||
| } | |||
| h.Left = x.Right | |||
| x.Right = h | |||
| x.Black = h.Black | |||
| h.Black = false | |||
| return x | |||
| } | |||
| // REQUIRE: Left and Right children must be present | |||
| func flip(h *Node) { | |||
| h.Black = !h.Black | |||
| h.Left.Black = !h.Left.Black | |||
| h.Right.Black = !h.Right.Black | |||
| } | |||
| // REQUIRE: Left and Right children must be present | |||
| func moveRedLeft(h *Node) *Node { | |||
| flip(h) | |||
| if isRed(h.Right.Left) { | |||
| h.Right = rotateRight(h.Right) | |||
| h = rotateLeft(h) | |||
| flip(h) | |||
| } | |||
| return h | |||
| } | |||
| // REQUIRE: Left and Right children must be present | |||
| func moveRedRight(h *Node) *Node { | |||
| flip(h) | |||
| if isRed(h.Left.Left) { | |||
| h = rotateRight(h) | |||
| flip(h) | |||
| } | |||
| return h | |||
| } | |||
| func fixUp(h *Node) *Node { | |||
| if isRed(h.Right) { | |||
| h = rotateLeft(h) | |||
| } | |||
| if isRed(h.Left) && isRed(h.Left.Left) { | |||
| h = rotateRight(h) | |||
| } | |||
| if isRed(h.Left) && isRed(h.Right) { | |||
| flip(h) | |||
| } | |||
| return h | |||
| } | |||
| @@ -1,17 +0,0 @@ | |||
| // Copyright 2010 Petar Maymounkov. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package llrb | |||
| type Int int | |||
| func (x Int) Less(than Item) bool { | |||
| return x < than.(Int) | |||
| } | |||
| type String string | |||
| func (x String) Less(than Item) bool { | |||
| return x < than.(String) | |||
| } | |||
| @@ -1,50 +0,0 @@ | |||
| The MIT License (MIT) | |||
| Copyright (c) 2015 dongxu | |||
| Permission is hereby granted, free of charge, to any person obtaining a copy | |||
| of this software and associated documentation files (the "Software"), to deal | |||
| in the Software without restriction, including without limitation the rights | |||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
| copies of the Software, and to permit persons to whom the Software is | |||
| furnished to do so, subject to the following conditions: | |||
| The above copyright notice and this permission notice shall be included in all | |||
| copies or substantial portions of the Software. | |||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
| SOFTWARE. | |||
| Copyright (c) 2014 Bryan Peterson. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -1,101 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/juju/errors" | |||
| "github.com/ngaut/log" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| type action interface { | |||
| ToProto() pb.Message | |||
| } | |||
| func (c *client) innerCall(table, row []byte, action action, useCache bool) (*call, error) { | |||
| region, err := c.LocateRegion(table, row, useCache) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| conn, err := c.getClientConn(region.Server) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| regionSpecifier := &proto.RegionSpecifier{ | |||
| Type: proto.RegionSpecifier_REGION_NAME.Enum(), | |||
| Value: []byte(region.Name), | |||
| } | |||
| var cl *call | |||
| switch a := action.(type) { | |||
| case *Get: | |||
| cl = newCall(&proto.GetRequest{ | |||
| Region: regionSpecifier, | |||
| Get: a.ToProto().(*proto.Get), | |||
| }) | |||
| case *Put, *Delete: | |||
| cl = newCall(&proto.MutateRequest{ | |||
| Region: regionSpecifier, | |||
| Mutation: a.ToProto().(*proto.MutationProto), | |||
| }) | |||
| case *CoprocessorServiceCall: | |||
| cl = newCall(&proto.CoprocessorServiceRequest{ | |||
| Region: regionSpecifier, | |||
| Call: a.ToProto().(*proto.CoprocessorServiceCall), | |||
| }) | |||
| default: | |||
| return nil, errors.Errorf("Unknown action - %T - %v", action, action) | |||
| } | |||
| err = conn.call(cl) | |||
| if err != nil { | |||
| // If failed, remove bad server conn cache. | |||
| cachedKey := cachedConnKey(region.Server, ClientService) | |||
| delete(c.cachedConns, cachedKey) | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return cl, nil | |||
| } | |||
| func (c *client) innerDo(table, row []byte, action action, useCache bool) (pb.Message, error) { | |||
| // Try to create and send a new resuqest call. | |||
| cl, err := c.innerCall(table, row, action, useCache) | |||
| if err != nil { | |||
| log.Warnf("inner call failed - %v", errors.ErrorStack(err)) | |||
| return nil, errors.Trace(err) | |||
| } | |||
| // Wait and receive the result. | |||
| return <-cl.responseCh, nil | |||
| } | |||
| func (c *client) do(table, row []byte, action action, useCache bool) (pb.Message, error) { | |||
| var ( | |||
| result pb.Message | |||
| err error | |||
| ) | |||
| LOOP: | |||
| for i := 0; i < c.maxRetries; i++ { | |||
| result, err = c.innerDo(table, row, action, useCache) | |||
| if err == nil { | |||
| switch r := result.(type) { | |||
| case *exception: | |||
| err = errors.New(r.msg) | |||
| // If get an execption response, clean old region cache. | |||
| c.CleanRegionCache(table) | |||
| default: | |||
| break LOOP | |||
| } | |||
| } | |||
| useCache = false | |||
| log.Warnf("Retrying action for the %d time(s), error - %v", i+1, errors.ErrorStack(err)) | |||
| retrySleep(i + 1) | |||
| } | |||
| return result, errors.Trace(err) | |||
| } | |||
| @@ -1,340 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "sort" | |||
| "strconv" | |||
| "strings" | |||
| "time" | |||
| "github.com/juju/errors" | |||
| "github.com/ngaut/log" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| const defaultNS = "default" | |||
| type TableName struct { | |||
| namespace string | |||
| name string | |||
| } | |||
| func newTableNameWithDefaultNS(tblName string) TableName { | |||
| return TableName{ | |||
| namespace: defaultNS, | |||
| name: tblName, | |||
| } | |||
| } | |||
| type TableDescriptor struct { | |||
| name TableName | |||
| attrs map[string][]byte | |||
| cfs []*ColumnFamilyDescriptor | |||
| } | |||
| func NewTableDesciptor(tblName string) *TableDescriptor { | |||
| ret := &TableDescriptor{ | |||
| name: newTableNameWithDefaultNS(tblName), | |||
| attrs: map[string][]byte{}, | |||
| } | |||
| ret.AddAddr("IS_META", "false") | |||
| return ret | |||
| } | |||
| func (c *TableDescriptor) AddAddr(attrName string, val string) { | |||
| c.attrs[attrName] = []byte(val) | |||
| } | |||
| func (t *TableDescriptor) AddColumnDesc(cf *ColumnFamilyDescriptor) { | |||
| for _, c := range t.cfs { | |||
| if c.name == cf.name { | |||
| return | |||
| } | |||
| } | |||
| t.cfs = append(t.cfs, cf) | |||
| } | |||
| type ColumnFamilyDescriptor struct { | |||
| name string | |||
| attrs map[string][]byte | |||
| } | |||
| func (c *ColumnFamilyDescriptor) AddAttr(attrName string, val string) { | |||
| c.attrs[attrName] = []byte(val) | |||
| } | |||
| // Themis will use VERSIONS=1 for some hook. | |||
| func NewColumnFamilyDescriptor(name string) *ColumnFamilyDescriptor { | |||
| return newColumnFamilyDescriptor(name, 1) | |||
| } | |||
| func newColumnFamilyDescriptor(name string, versionsNum int) *ColumnFamilyDescriptor { | |||
| versions := strconv.Itoa(versionsNum) | |||
| ret := &ColumnFamilyDescriptor{ | |||
| name: name, | |||
| attrs: make(map[string][]byte), | |||
| } | |||
| // add default attrs | |||
| ret.AddAttr("DATA_BLOCK_ENCODING", "NONE") | |||
| ret.AddAttr("BLOOMFILTER", "ROW") | |||
| ret.AddAttr("REPLICATION_SCOPE", "0") | |||
| ret.AddAttr("COMPRESSION", "NONE") | |||
| ret.AddAttr("VERSIONS", versions) | |||
| ret.AddAttr("TTL", "2147483647") // 1 << 31 | |||
| ret.AddAttr("MIN_VERSIONS", "0") | |||
| ret.AddAttr("KEEP_DELETED_CELLS", "false") | |||
| ret.AddAttr("BLOCKSIZE", "65536") | |||
| ret.AddAttr("IN_MEMORY", "false") | |||
| ret.AddAttr("BLOCKCACHE", "true") | |||
| return ret | |||
| } | |||
| func getPauseTime(retry int) int64 { | |||
| if retry >= len(retryPauseTime) { | |||
| retry = len(retryPauseTime) - 1 | |||
| } | |||
| if retry < 0 { | |||
| retry = 0 | |||
| } | |||
| return retryPauseTime[retry] * defaultRetryWaitMs | |||
| } | |||
| func (c *client) CreateTable(t *TableDescriptor, splits [][]byte) error { | |||
| req := &proto.CreateTableRequest{} | |||
| schema := &proto.TableSchema{} | |||
| sort.Sort(BytesSlice(splits)) | |||
| schema.TableName = &proto.TableName{ | |||
| Qualifier: []byte(t.name.name), | |||
| Namespace: []byte(t.name.namespace), | |||
| } | |||
| for k, v := range t.attrs { | |||
| schema.Attributes = append(schema.Attributes, &proto.BytesBytesPair{ | |||
| First: []byte(k), | |||
| Second: []byte(v), | |||
| }) | |||
| } | |||
| for _, c := range t.cfs { | |||
| cf := &proto.ColumnFamilySchema{ | |||
| Name: []byte(c.name), | |||
| } | |||
| for k, v := range c.attrs { | |||
| cf.Attributes = append(cf.Attributes, &proto.BytesBytesPair{ | |||
| First: []byte(k), | |||
| Second: []byte(v), | |||
| }) | |||
| } | |||
| schema.ColumnFamilies = append(schema.ColumnFamilies, cf) | |||
| } | |||
| req.TableSchema = schema | |||
| req.SplitKeys = splits | |||
| ch, err := c.adminAction(req) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| resp := <-ch | |||
| switch r := resp.(type) { | |||
| case *exception: | |||
| return errors.New(r.msg) | |||
| } | |||
| // wait and check | |||
| for retry := 0; retry < defaultMaxActionRetries*retryLongerMultiplier; retry++ { | |||
| regCnt := 0 | |||
| numRegs := len(splits) + 1 | |||
| err = c.metaScan(t.name.name, func(r *RegionInfo) (bool, error) { | |||
| if !(r.Offline || r.Split) && len(r.Server) > 0 && r.TableName == t.name.name { | |||
| regCnt++ | |||
| } | |||
| return true, nil | |||
| }) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| if regCnt == numRegs { | |||
| return nil | |||
| } | |||
| log.Warnf("Retrying create table for the %d time(s)", retry+1) | |||
| time.Sleep(time.Duration(getPauseTime(retry)) * time.Millisecond) | |||
| } | |||
| return errors.New("create table timeout") | |||
| } | |||
| func (c *client) DisableTable(tblName string) error { | |||
| req := &proto.DisableTableRequest{ | |||
| TableName: &proto.TableName{ | |||
| Qualifier: []byte(tblName), | |||
| Namespace: []byte(defaultNS), | |||
| }, | |||
| } | |||
| ch, err := c.adminAction(req) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| resp := <-ch | |||
| switch r := resp.(type) { | |||
| case *exception: | |||
| return errors.New(r.msg) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *client) EnableTable(tblName string) error { | |||
| req := &proto.EnableTableRequest{ | |||
| TableName: &proto.TableName{ | |||
| Qualifier: []byte(tblName), | |||
| Namespace: []byte(defaultNS), | |||
| }, | |||
| } | |||
| ch, err := c.adminAction(req) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| resp := <-ch | |||
| switch r := resp.(type) { | |||
| case *exception: | |||
| return errors.New(r.msg) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *client) DropTable(tblName string) error { | |||
| req := &proto.DeleteTableRequest{ | |||
| TableName: &proto.TableName{ | |||
| Qualifier: []byte(tblName), | |||
| Namespace: []byte(defaultNS), | |||
| }, | |||
| } | |||
| ch, err := c.adminAction(req) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| resp := <-ch | |||
| switch r := resp.(type) { | |||
| case *exception: | |||
| return errors.New(r.msg) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *client) metaScan(tbl string, fn func(r *RegionInfo) (bool, error)) error { | |||
| scan := NewScan(metaTableName, 0, c) | |||
| defer scan.Close() | |||
| scan.StartRow = []byte(tbl) | |||
| scan.StopRow = nextKey([]byte(tbl)) | |||
| for { | |||
| r := scan.Next() | |||
| if r == nil || scan.Closed() { | |||
| break | |||
| } | |||
| region, err := c.parseRegion(r) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| if more, err := fn(region); !more || err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (c *client) TableExists(tbl string) (bool, error) { | |||
| found := false | |||
| err := c.metaScan(tbl, func(region *RegionInfo) (bool, error) { | |||
| if region.TableName == tbl { | |||
| found = true | |||
| return false, nil | |||
| } | |||
| return true, nil | |||
| }) | |||
| if err != nil { | |||
| return false, errors.Trace(err) | |||
| } | |||
| return found, nil | |||
| } | |||
| // Split splits region. | |||
| // tblOrRegion table name or region(<tbl>,<endKey>,<timestamp>.<md5>). | |||
| // splitPoint which is a key, leave "" if want to split each region automatically. | |||
| func (c *client) Split(tblOrRegion, splitPoint string) error { | |||
| // Extract table name from supposing regionName. | |||
| tbls := strings.SplitN(tblOrRegion, ",", 2) | |||
| tbl := tbls[0] | |||
| found := false | |||
| var foundRegion *RegionInfo | |||
| err := c.metaScan(tbl, func(region *RegionInfo) (bool, error) { | |||
| if region != nil && region.Name == tblOrRegion { | |||
| found = true | |||
| foundRegion = region | |||
| return false, nil | |||
| } | |||
| return true, nil | |||
| }) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| // This is a region name, split it directly. | |||
| if found { | |||
| return c.split(foundRegion, []byte(splitPoint)) | |||
| } | |||
| // This is a table name. | |||
| tbl = tblOrRegion | |||
| regions, err := c.GetRegions([]byte(tbl), false) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| // Split each region. | |||
| for _, region := range regions { | |||
| err := c.split(region, []byte(splitPoint)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (c *client) split(region *RegionInfo, splitPoint []byte) error { | |||
| // Not in this region, skip it. | |||
| if len(splitPoint) > 0 && !findKey(region, splitPoint) { | |||
| return nil | |||
| } | |||
| c.CleanRegionCache([]byte(region.TableName)) | |||
| rs := NewRegionSpecifier(region.Name) | |||
| req := &proto.SplitRegionRequest{ | |||
| Region: rs, | |||
| } | |||
| if len(splitPoint) > 0 { | |||
| req.SplitPoint = splitPoint | |||
| } | |||
| // Empty response. | |||
| _, err := c.regionAction(region.Server, req) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -1,100 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "strings" | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| type call struct { | |||
| id uint32 | |||
| methodName string | |||
| request pb.Message | |||
| responseBuffer pb.Message | |||
| responseCh chan pb.Message | |||
| } | |||
| type exception struct { | |||
| msg string | |||
| } | |||
| func isNotInRegionError(err error) bool { | |||
| return strings.Contains(err.Error(), "org.apache.hadoop.hbase.NotServingRegionException") | |||
| } | |||
| func isUnknownScannerError(err error) bool { | |||
| return strings.Contains(err.Error(), "org.apache.hadoop.hbase.UnknownScannerException") | |||
| } | |||
| func (m *exception) Reset() { *m = exception{} } | |||
| func (m *exception) String() string { return m.msg } | |||
| func (m *exception) ProtoMessage() {} | |||
| func newCall(request pb.Message) *call { | |||
| var responseBuffer pb.Message | |||
| var methodName string | |||
| switch request.(type) { | |||
| case *proto.GetRequest: | |||
| responseBuffer = &proto.GetResponse{} | |||
| methodName = "Get" | |||
| case *proto.MutateRequest: | |||
| responseBuffer = &proto.MutateResponse{} | |||
| methodName = "Mutate" | |||
| case *proto.ScanRequest: | |||
| responseBuffer = &proto.ScanResponse{} | |||
| methodName = "Scan" | |||
| case *proto.GetTableDescriptorsRequest: | |||
| responseBuffer = &proto.GetTableDescriptorsResponse{} | |||
| methodName = "GetTableDescriptors" | |||
| case *proto.CoprocessorServiceRequest: | |||
| responseBuffer = &proto.CoprocessorServiceResponse{} | |||
| methodName = "ExecService" | |||
| case *proto.CreateTableRequest: | |||
| responseBuffer = &proto.CreateTableResponse{} | |||
| methodName = "CreateTable" | |||
| case *proto.DisableTableRequest: | |||
| responseBuffer = &proto.DisableTableResponse{} | |||
| methodName = "DisableTable" | |||
| case *proto.EnableTableRequest: | |||
| responseBuffer = &proto.EnableTableResponse{} | |||
| methodName = "EnableTable" | |||
| case *proto.DeleteTableRequest: | |||
| responseBuffer = &proto.DeleteTableResponse{} | |||
| methodName = "DeleteTable" | |||
| case *proto.MultiRequest: | |||
| responseBuffer = &proto.MultiResponse{} | |||
| methodName = "Multi" | |||
| case *proto.SplitRegionRequest: | |||
| responseBuffer = &proto.SplitRegionResponse{} | |||
| methodName = "SplitRegion" | |||
| } | |||
| return &call{ | |||
| methodName: methodName, | |||
| request: request, | |||
| responseBuffer: responseBuffer, | |||
| responseCh: make(chan pb.Message, 1), | |||
| } | |||
| } | |||
| func (c *call) complete(err error, response []byte) { | |||
| defer close(c.responseCh) | |||
| if err != nil { | |||
| c.responseCh <- &exception{ | |||
| msg: err.Error(), | |||
| } | |||
| return | |||
| } | |||
| err = pb.Unmarshal(response, c.responseBuffer) | |||
| if err != nil { | |||
| c.responseCh <- &exception{ | |||
| msg: err.Error(), | |||
| } | |||
| return | |||
| } | |||
| c.responseCh <- c.responseBuffer | |||
| } | |||
| @@ -1,454 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "bytes" | |||
| "crypto/md5" | |||
| "encoding/binary" | |||
| "encoding/hex" | |||
| "fmt" | |||
| "sync" | |||
| "time" | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/juju/errors" | |||
| "github.com/ngaut/go-zookeeper/zk" | |||
| "github.com/ngaut/log" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| const ( | |||
| zkRootRegionPath = "/meta-region-server" | |||
| zkMasterAddrPath = "/master" | |||
| magicHeadByte = 0xff | |||
| magicHeadSize = 1 | |||
| idLengthSize = 4 | |||
| md5HexSize = 32 | |||
| servernameSeparator = "," | |||
| rpcTimeout = 30000 | |||
| pingTimeout = 30000 | |||
| callTimeout = 5000 | |||
| defaultMaxActionRetries = 3 | |||
| // Some operations can take a long time such as disable of big table. | |||
| // numRetries is for 'normal' stuff... Multiply by this factor when | |||
| // want to wait a long time. | |||
| retryLongerMultiplier = 31 | |||
| socketDefaultRetryWaitMs = 200 | |||
| defaultRetryWaitMs = 100 | |||
| // always >= any unix timestamp(hbase version) | |||
| beyondMaxTimestamp = "99999999999999" | |||
| ) | |||
| var ( | |||
| hbaseHeaderBytes []byte = []byte("HBas") | |||
| metaTableName []byte = []byte("hbase:meta") | |||
| metaRegionName []byte = []byte("hbase:meta,,1") | |||
| ) | |||
| var retryPauseTime = []int64{1, 2, 3, 5, 10, 20, 40, 100, 100, 100, 100, 200, 200} | |||
| type RegionInfo struct { | |||
| Server string | |||
| StartKey []byte | |||
| EndKey []byte | |||
| Name string | |||
| Ts string | |||
| TableNamespace string | |||
| TableName string | |||
| Offline bool | |||
| Split bool | |||
| } | |||
| type tableInfo struct { | |||
| tableName string | |||
| families []string | |||
| } | |||
| // export client interface | |||
| type HBaseClient interface { | |||
| Get(tbl string, g *Get) (*ResultRow, error) | |||
| Put(tbl string, p *Put) (bool, error) | |||
| Delete(tbl string, d *Delete) (bool, error) | |||
| TableExists(tbl string) (bool, error) | |||
| DropTable(t string) error | |||
| DisableTable(t string) error | |||
| EnableTable(t string) error | |||
| CreateTable(t *TableDescriptor, splits [][]byte) error | |||
| ServiceCall(table string, call *CoprocessorServiceCall) (*proto.CoprocessorServiceResponse, error) | |||
| LocateRegion(table, row []byte, useCache bool) (*RegionInfo, error) | |||
| GetRegions(table []byte, useCache bool) ([]*RegionInfo, error) | |||
| Split(tblOrRegion, splitPoint string) error | |||
| CleanRegionCache(table []byte) | |||
| CleanAllRegionCache() | |||
| Close() error | |||
| } | |||
| // hbase client implemetation | |||
| var _ HBaseClient = (*client)(nil) | |||
| type client struct { | |||
| mu sync.RWMutex // for read/update region info | |||
| zkClient *zk.Conn | |||
| zkHosts []string | |||
| zkRoot string | |||
| prefetched map[string]bool | |||
| cachedConns map[string]*connection | |||
| cachedRegionInfo map[string]map[string]*RegionInfo | |||
| maxRetries int | |||
| rootServerName *proto.ServerName | |||
| masterServerName *proto.ServerName | |||
| } | |||
| func serverNameToAddr(server *proto.ServerName) string { | |||
| return fmt.Sprintf("%s:%d", server.GetHostName(), server.GetPort()) | |||
| } | |||
| func cachedConnKey(addr string, srvType ServiceType) string { | |||
| return fmt.Sprintf("%s|%d", addr, srvType) | |||
| } | |||
| func NewClient(zkHosts []string, zkRoot string) (HBaseClient, error) { | |||
| cl := &client{ | |||
| zkHosts: zkHosts, | |||
| zkRoot: zkRoot, | |||
| cachedConns: make(map[string]*connection), | |||
| cachedRegionInfo: make(map[string]map[string]*RegionInfo), | |||
| prefetched: make(map[string]bool), | |||
| maxRetries: defaultMaxActionRetries, | |||
| } | |||
| err := cl.init() | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return cl, nil | |||
| } | |||
| func (c *client) decodeMeta(data []byte) (*proto.ServerName, error) { | |||
| if data[0] != magicHeadByte { | |||
| return nil, errors.New("unknown packet") | |||
| } | |||
| var n int32 | |||
| err := binary.Read(bytes.NewBuffer(data[1:]), binary.BigEndian, &n) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| dataOffset := magicHeadSize + idLengthSize + int(n) | |||
| data = data[(dataOffset + 4):] | |||
| var mrs proto.MetaRegionServer | |||
| err = pb.Unmarshal(data, &mrs) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return mrs.GetServer(), nil | |||
| } | |||
| // init and get root region server addr and master addr | |||
| func (c *client) init() error { | |||
| zkclient, _, err := zk.Connect(c.zkHosts, time.Second*30) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.zkClient = zkclient | |||
| res, _, _, err := c.zkClient.GetW(c.zkRoot + zkRootRegionPath) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.rootServerName, err = c.decodeMeta(res) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| log.Debug("connect root region server...", c.rootServerName) | |||
| serverAddr := serverNameToAddr(c.rootServerName) | |||
| conn, err := newConnection(serverAddr, ClientService) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| // Set buffered regionserver conn. | |||
| cachedKey := cachedConnKey(serverAddr, ClientService) | |||
| c.cachedConns[cachedKey] = conn | |||
| res, _, _, err = c.zkClient.GetW(c.zkRoot + zkMasterAddrPath) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.masterServerName, err = c.decodeMeta(res) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| return nil | |||
| } | |||
| // get connection | |||
| func (c *client) getConn(addr string, srvType ServiceType) (*connection, error) { | |||
| connKey := cachedConnKey(addr, srvType) | |||
| c.mu.RLock() | |||
| conn, ok := c.cachedConns[connKey] | |||
| c.mu.RUnlock() | |||
| if ok { | |||
| return conn, nil | |||
| } | |||
| var err error | |||
| conn, err = newConnection(addr, srvType) | |||
| if err != nil { | |||
| return nil, errors.Errorf("create new connection failed - %v", errors.ErrorStack(err)) | |||
| } | |||
| c.mu.Lock() | |||
| c.cachedConns[connKey] = conn | |||
| c.mu.Unlock() | |||
| return conn, nil | |||
| } | |||
| func (c *client) getAdminConn(addr string) (*connection, error) { | |||
| return c.getConn(addr, AdminService) | |||
| } | |||
| func (c *client) getClientConn(addr string) (*connection, error) { | |||
| return c.getConn(addr, ClientService) | |||
| } | |||
| func (c *client) getMasterConn() (*connection, error) { | |||
| return c.getConn(serverNameToAddr(c.masterServerName), MasterService) | |||
| } | |||
| func (c *client) doAction(conn *connection, req pb.Message) (chan pb.Message, error) { | |||
| cl := newCall(req) | |||
| err := conn.call(cl) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return cl.responseCh, nil | |||
| } | |||
| func (c *client) adminAction(req pb.Message) (chan pb.Message, error) { | |||
| conn, err := c.getMasterConn() | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return c.doAction(conn, req) | |||
| } | |||
| func (c *client) regionAction(addr string, req pb.Message) (chan pb.Message, error) { | |||
| conn, err := c.getAdminConn(addr) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return c.doAction(conn, req) | |||
| } | |||
| // http://stackoverflow.com/questions/27602013/correct-way-to-get-region-name-by-using-hbase-api | |||
| func (c *client) createRegionName(table, startKey []byte, id string, newFormat bool) []byte { | |||
| if len(startKey) == 0 { | |||
| startKey = make([]byte, 1) | |||
| } | |||
| b := bytes.Join([][]byte{table, startKey, []byte(id)}, []byte{','}) | |||
| if newFormat { | |||
| m := md5.Sum(b) | |||
| mhex := []byte(hex.EncodeToString(m[:])) | |||
| b = append(bytes.Join([][]byte{b, mhex}, []byte{'.'}), '.') | |||
| } | |||
| return b | |||
| } | |||
| func (c *client) parseRegion(rr *ResultRow) (*RegionInfo, error) { | |||
| regionInfoCol, ok := rr.Columns["info:regioninfo"] | |||
| if !ok { | |||
| return nil, errors.Errorf("Unable to parse region location (no regioninfo column): %#v", rr) | |||
| } | |||
| offset := bytes.Index(regionInfoCol.Value, []byte("PBUF")) + 4 | |||
| regionInfoBytes := regionInfoCol.Value[offset:] | |||
| var info proto.RegionInfo | |||
| err := pb.Unmarshal(regionInfoBytes, &info) | |||
| if err != nil { | |||
| return nil, errors.Errorf("Unable to parse region location: %#v", err) | |||
| } | |||
| ri := &RegionInfo{ | |||
| StartKey: info.GetStartKey(), | |||
| EndKey: info.GetEndKey(), | |||
| Name: bytes.NewBuffer(rr.Row).String(), | |||
| TableNamespace: string(info.GetTableName().GetNamespace()), | |||
| TableName: string(info.GetTableName().GetQualifier()), | |||
| Offline: info.GetOffline(), | |||
| Split: info.GetSplit(), | |||
| } | |||
| if v, ok := rr.Columns["info:server"]; ok { | |||
| ri.Server = string(v.Value) | |||
| } | |||
| return ri, nil | |||
| } | |||
| func (c *client) getMetaRegion() *RegionInfo { | |||
| return &RegionInfo{ | |||
| StartKey: []byte{}, | |||
| EndKey: []byte{}, | |||
| Name: string(metaRegionName), | |||
| Server: serverNameToAddr(c.rootServerName), | |||
| } | |||
| } | |||
| func (c *client) getCachedLocation(table, row []byte) *RegionInfo { | |||
| c.mu.RLock() | |||
| defer c.mu.RUnlock() | |||
| tableStr := string(table) | |||
| if regions, ok := c.cachedRegionInfo[tableStr]; ok { | |||
| for _, region := range regions { | |||
| if (len(region.EndKey) == 0 || | |||
| bytes.Compare(row, region.EndKey) < 0) && | |||
| (len(region.StartKey) == 0 || | |||
| bytes.Compare(row, region.StartKey) >= 0) { | |||
| return region | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (c *client) updateRegionCache(table []byte, region *RegionInfo) { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| tableStr := string(table) | |||
| if _, ok := c.cachedRegionInfo[tableStr]; !ok { | |||
| c.cachedRegionInfo[tableStr] = make(map[string]*RegionInfo) | |||
| } | |||
| c.cachedRegionInfo[tableStr][region.Name] = region | |||
| } | |||
| func (c *client) CleanRegionCache(table []byte) { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| delete(c.cachedRegionInfo, string(table)) | |||
| } | |||
| func (c *client) CleanAllRegionCache() { | |||
| c.mu.Lock() | |||
| defer c.mu.Unlock() | |||
| c.cachedRegionInfo = map[string]map[string]*RegionInfo{} | |||
| } | |||
| func (c *client) LocateRegion(table, row []byte, useCache bool) (*RegionInfo, error) { | |||
| // If user wants to locate metaregion, just return it. | |||
| if bytes.Equal(table, metaTableName) { | |||
| return c.getMetaRegion(), nil | |||
| } | |||
| // Try to get location from cache. | |||
| if useCache { | |||
| if r := c.getCachedLocation(table, row); r != nil { | |||
| return r, nil | |||
| } | |||
| } | |||
| // If cache missed or not using cache, try to get and update region info. | |||
| metaRegion := c.getMetaRegion() | |||
| conn, err := c.getClientConn(metaRegion.Server) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| regionRow := c.createRegionName(table, row, beyondMaxTimestamp, true) | |||
| call := newCall(&proto.GetRequest{ | |||
| Region: &proto.RegionSpecifier{ | |||
| Type: proto.RegionSpecifier_REGION_NAME.Enum(), | |||
| Value: metaRegionName, | |||
| }, | |||
| Get: &proto.Get{ | |||
| Row: regionRow, | |||
| Column: []*proto.Column{&proto.Column{ | |||
| Family: []byte("info"), | |||
| }}, | |||
| ClosestRowBefore: pb.Bool(true), | |||
| }, | |||
| }) | |||
| err = conn.call(call) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| response := <-call.responseCh | |||
| switch r := response.(type) { | |||
| case *proto.GetResponse: | |||
| res := r.GetResult() | |||
| if res == nil { | |||
| return nil, errors.Errorf("Empty region: [table=%s] [row=%q] [region_row=%q]", table, row, regionRow) | |||
| } | |||
| rr := NewResultRow(res) | |||
| region, err := c.parseRegion(rr) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| c.updateRegionCache(table, region) | |||
| return region, nil | |||
| case *exception: | |||
| return nil, errors.New(r.msg) | |||
| default: | |||
| log.Warnf("Unknown response - %T - %v", r, r) | |||
| } | |||
| return nil, errors.Errorf("Couldn't find the region: [table=%s] [row=%q] [region_row=%q]", table, row, regionRow) | |||
| } | |||
| func (c *client) GetRegions(table []byte, useCache bool) ([]*RegionInfo, error) { | |||
| var regions []*RegionInfo | |||
| startKey := []byte("") | |||
| // Get first region. | |||
| region, err := c.LocateRegion(table, []byte(startKey), useCache) | |||
| if err != nil { | |||
| return nil, errors.Errorf("couldn't find any region: [table=%s] [useCache=%t]", table, useCache) | |||
| } | |||
| regions = append(regions, region) | |||
| startKey = region.EndKey | |||
| for len(startKey) > 0 { | |||
| region, err = c.LocateRegion(table, []byte(startKey), useCache) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| regions = append(regions, region) | |||
| startKey = region.EndKey | |||
| } | |||
| return regions, nil | |||
| } | |||
| func (c *client) Close() error { | |||
| if c.zkClient != nil { | |||
| c.zkClient.Close() | |||
| } | |||
| for _, conn := range c.cachedConns { | |||
| err := conn.close() | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -1,67 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "github.com/juju/errors" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| func (c *client) Delete(table string, del *Delete) (bool, error) { | |||
| response, err := c.do([]byte(table), del.GetRow(), del, true) | |||
| if err != nil { | |||
| return false, errors.Trace(err) | |||
| } | |||
| switch r := response.(type) { | |||
| case *proto.MutateResponse: | |||
| return r.GetProcessed(), nil | |||
| } | |||
| return false, errors.Errorf("Invalid response seen [response: %#v]", response) | |||
| } | |||
| func (c *client) Get(table string, get *Get) (*ResultRow, error) { | |||
| response, err := c.do([]byte(table), get.GetRow(), get, true) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| switch r := response.(type) { | |||
| case *proto.GetResponse: | |||
| res := r.GetResult() | |||
| if res == nil { | |||
| return nil, errors.Errorf("Empty response: [table=%s] [row=%q]", table, get.GetRow()) | |||
| } | |||
| return NewResultRow(res), nil | |||
| case *exception: | |||
| return nil, errors.New(r.msg) | |||
| } | |||
| return nil, errors.Errorf("Invalid response seen [response: %#v]", response) | |||
| } | |||
| func (c *client) Put(table string, put *Put) (bool, error) { | |||
| response, err := c.do([]byte(table), put.GetRow(), put, true) | |||
| if err != nil { | |||
| return false, errors.Trace(err) | |||
| } | |||
| switch r := response.(type) { | |||
| case *proto.MutateResponse: | |||
| return r.GetProcessed(), nil | |||
| } | |||
| return false, errors.Errorf("Invalid response seen [response: %#v]", response) | |||
| } | |||
| func (c *client) ServiceCall(table string, call *CoprocessorServiceCall) (*proto.CoprocessorServiceResponse, error) { | |||
| response, err := c.do([]byte(table), call.Row, call, true) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| switch r := response.(type) { | |||
| case *proto.CoprocessorServiceResponse: | |||
| return r, nil | |||
| case *exception: | |||
| return nil, errors.New(r.msg) | |||
| } | |||
| return nil, errors.Errorf("Invalid response seen [response: %#v]", response) | |||
| } | |||
| @@ -1,177 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "io" | |||
| "github.com/juju/errors" | |||
| "github.com/pingcap/go-hbase/iohelper" | |||
| ) | |||
| type Column struct { | |||
| Family []byte | |||
| Qual []byte | |||
| } | |||
| func NewColumn(family, qual []byte) *Column { | |||
| return &Column{ | |||
| Family: family, | |||
| Qual: qual, | |||
| } | |||
| } | |||
| func encode(parts ...[]byte) ([]byte, error) { | |||
| buf := &bytes.Buffer{} | |||
| for _, p := range parts { | |||
| err := iohelper.WriteVarBytes(buf, p) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| } | |||
| return buf.Bytes(), nil | |||
| } | |||
| func decode(encoded []byte) ([][]byte, error) { | |||
| var ret [][]byte | |||
| buf := bytes.NewBuffer(encoded) | |||
| for { | |||
| b, err := iohelper.ReadVarBytes(buf) | |||
| if len(b) == 0 || (err != nil && ErrorEqual(err, io.EOF)) { | |||
| break | |||
| } | |||
| ret = append(ret, b) | |||
| } | |||
| return ret, nil | |||
| } | |||
| func (c *Column) Write(w io.Writer) error { | |||
| err := iohelper.WriteVarBytes(w, c.Family) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| err = iohelper.WriteVarBytes(w, c.Qual) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *Column) String() string { | |||
| b, err := encode(c.Family, c.Qual) | |||
| if err != nil { | |||
| return fmt.Sprintf("invalid column - %v", err) | |||
| } | |||
| return string(b) | |||
| } | |||
| func (c *Column) ParseFromString(s string) error { | |||
| pairs, err := decode([]byte(s)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.Family = pairs[0] | |||
| c.Qual = pairs[1] | |||
| return nil | |||
| } | |||
| type ColumnCoordinate struct { | |||
| Table []byte | |||
| Row []byte | |||
| Column | |||
| } | |||
| func NewColumnCoordinate(table, row, family, qual []byte) *ColumnCoordinate { | |||
| return &ColumnCoordinate{ | |||
| Table: table, | |||
| Row: row, | |||
| Column: Column{ | |||
| Family: family, | |||
| Qual: qual, | |||
| }, | |||
| } | |||
| } | |||
| func (c *ColumnCoordinate) Write(w io.Writer) error { | |||
| err := iohelper.WriteVarBytes(w, c.Table) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| err = iohelper.WriteVarBytes(w, c.Row) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| err = c.Column.Write(w) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *ColumnCoordinate) Equal(a *ColumnCoordinate) bool { | |||
| return bytes.Compare(c.Table, a.Table) == 0 && | |||
| bytes.Compare(c.Row, a.Row) == 0 && | |||
| bytes.Compare(c.Family, a.Family) == 0 && | |||
| bytes.Compare(c.Qual, a.Qual) == 0 | |||
| } | |||
| func (c *ColumnCoordinate) String() string { | |||
| b, err := encode(c.Table, c.Row, c.Family, c.Qual) | |||
| if err != nil { | |||
| return fmt.Sprintf("invalid column coordinate - %v", err) | |||
| } | |||
| return string(b) | |||
| } | |||
| func (c *ColumnCoordinate) ParseFromString(s string) error { | |||
| pairs, err := decode([]byte(s)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.Table = pairs[0] | |||
| c.Row = pairs[1] | |||
| c.Family = pairs[2] | |||
| c.Qual = pairs[3] | |||
| return nil | |||
| } | |||
| func (c *ColumnCoordinate) ParseField(b iohelper.ByteMultiReader) error { | |||
| table, err := iohelper.ReadVarBytes(b) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.Table = table | |||
| row, err := iohelper.ReadVarBytes(b) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.Row = row | |||
| family, err := iohelper.ReadVarBytes(b) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.Family = family | |||
| qual, err := iohelper.ReadVarBytes(b) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| c.Qual = qual | |||
| return nil | |||
| } | |||
| func (c *ColumnCoordinate) GetColumn() *Column { | |||
| return &Column{ | |||
| Family: c.Family, | |||
| Qual: c.Qual, | |||
| } | |||
| } | |||
| @@ -1,291 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "bufio" | |||
| "bytes" | |||
| "io" | |||
| "net" | |||
| "strings" | |||
| "sync" | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/juju/errors" | |||
| "github.com/ngaut/log" | |||
| "github.com/pingcap/go-hbase/iohelper" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| type ServiceType byte | |||
| const ( | |||
| MasterMonitorService = iota + 1 | |||
| MasterService | |||
| MasterAdminService | |||
| AdminService | |||
| ClientService | |||
| RegionServerStatusService | |||
| ) | |||
| // convert above const to protobuf string | |||
| var ServiceString = map[ServiceType]string{ | |||
| MasterMonitorService: "MasterMonitorService", | |||
| MasterService: "MasterService", | |||
| MasterAdminService: "MasterAdminService", | |||
| AdminService: "AdminService", | |||
| ClientService: "ClientService", | |||
| RegionServerStatusService: "RegionServerStatusService", | |||
| } | |||
| type idGenerator struct { | |||
| n int | |||
| mu *sync.RWMutex | |||
| } | |||
| func newIdGenerator() *idGenerator { | |||
| return &idGenerator{ | |||
| n: 0, | |||
| mu: &sync.RWMutex{}, | |||
| } | |||
| } | |||
| func (a *idGenerator) get() int { | |||
| a.mu.RLock() | |||
| v := a.n | |||
| a.mu.RUnlock() | |||
| return v | |||
| } | |||
| func (a *idGenerator) incrAndGet() int { | |||
| a.mu.Lock() | |||
| a.n++ | |||
| v := a.n | |||
| a.mu.Unlock() | |||
| return v | |||
| } | |||
| type connection struct { | |||
| mu sync.Mutex | |||
| addr string | |||
| conn net.Conn | |||
| bw *bufio.Writer | |||
| idGen *idGenerator | |||
| serviceType ServiceType | |||
| in chan *iohelper.PbBuffer | |||
| ongoingCalls map[int]*call | |||
| } | |||
| func processMessage(msg []byte) ([][]byte, error) { | |||
| buf := pb.NewBuffer(msg) | |||
| payloads := make([][]byte, 0) | |||
| // Question: why can we ignore this error? | |||
| for { | |||
| hbytes, err := buf.DecodeRawBytes(true) | |||
| if err != nil { | |||
| // Check whether error is `unexpected EOF`. | |||
| if strings.Contains(err.Error(), "unexpected EOF") { | |||
| break | |||
| } | |||
| log.Errorf("Decode raw bytes error - %v", errors.ErrorStack(err)) | |||
| return nil, errors.Trace(err) | |||
| } | |||
| payloads = append(payloads, hbytes) | |||
| } | |||
| return payloads, nil | |||
| } | |||
| func readPayloads(r io.Reader) ([][]byte, error) { | |||
| nBytesExpecting, err := iohelper.ReadInt32(r) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| if nBytesExpecting > 0 { | |||
| buf, err := iohelper.ReadN(r, nBytesExpecting) | |||
| // Question: why should we return error only when we get an io.EOF error? | |||
| if err != nil && ErrorEqual(err, io.EOF) { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| payloads, err := processMessage(buf) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| if len(payloads) > 0 { | |||
| return payloads, nil | |||
| } | |||
| } | |||
| return nil, errors.New("unexpected payload") | |||
| } | |||
| func newConnection(addr string, srvType ServiceType) (*connection, error) { | |||
| conn, err := net.Dial("tcp", addr) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| if _, ok := ServiceString[srvType]; !ok { | |||
| return nil, errors.Errorf("unexpected service type [serviceType=%d]", srvType) | |||
| } | |||
| c := &connection{ | |||
| addr: addr, | |||
| bw: bufio.NewWriter(conn), | |||
| conn: conn, | |||
| in: make(chan *iohelper.PbBuffer, 20), | |||
| serviceType: srvType, | |||
| idGen: newIdGenerator(), | |||
| ongoingCalls: map[int]*call{}, | |||
| } | |||
| err = c.init() | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return c, nil | |||
| } | |||
| func (c *connection) init() error { | |||
| err := c.writeHead() | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| err = c.writeConnectionHeader() | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| go func() { | |||
| err := c.processMessages() | |||
| if err != nil { | |||
| log.Warnf("process messages failed - %v", errors.ErrorStack(err)) | |||
| return | |||
| } | |||
| }() | |||
| go c.dispatch() | |||
| return nil | |||
| } | |||
| func (c *connection) processMessages() error { | |||
| for { | |||
| msgs, err := readPayloads(c.conn) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| var rh proto.ResponseHeader | |||
| err = pb.Unmarshal(msgs[0], &rh) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| callId := rh.GetCallId() | |||
| c.mu.Lock() | |||
| call, ok := c.ongoingCalls[int(callId)] | |||
| if !ok { | |||
| c.mu.Unlock() | |||
| return errors.Errorf("Invalid call id: %d", callId) | |||
| } | |||
| delete(c.ongoingCalls, int(callId)) | |||
| c.mu.Unlock() | |||
| exception := rh.GetException() | |||
| if exception != nil { | |||
| call.complete(errors.Errorf("Exception returned: %s\n%s", exception.GetExceptionClassName(), exception.GetStackTrace()), nil) | |||
| } else if len(msgs) == 2 { | |||
| call.complete(nil, msgs[1]) | |||
| } | |||
| } | |||
| } | |||
| func (c *connection) writeHead() error { | |||
| buf := bytes.NewBuffer(nil) | |||
| buf.Write(hbaseHeaderBytes) | |||
| buf.WriteByte(0) | |||
| buf.WriteByte(80) | |||
| _, err := c.conn.Write(buf.Bytes()) | |||
| return errors.Trace(err) | |||
| } | |||
| func (c *connection) writeConnectionHeader() error { | |||
| buf := iohelper.NewPbBuffer() | |||
| service := pb.String(ServiceString[c.serviceType]) | |||
| err := buf.WritePBMessage(&proto.ConnectionHeader{ | |||
| UserInfo: &proto.UserInformation{ | |||
| EffectiveUser: pb.String("pingcap"), | |||
| }, | |||
| ServiceName: service, | |||
| }) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| err = buf.PrependSize() | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| _, err = c.conn.Write(buf.Bytes()) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| return nil | |||
| } | |||
| func (c *connection) dispatch() { | |||
| for { | |||
| select { | |||
| case buf := <-c.in: | |||
| // TODO: add error check. | |||
| c.bw.Write(buf.Bytes()) | |||
| if len(c.in) == 0 { | |||
| c.bw.Flush() | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func (c *connection) call(request *call) error { | |||
| id := c.idGen.incrAndGet() | |||
| rh := &proto.RequestHeader{ | |||
| CallId: pb.Uint32(uint32(id)), | |||
| MethodName: pb.String(request.methodName), | |||
| RequestParam: pb.Bool(true), | |||
| } | |||
| request.id = uint32(id) | |||
| bfrh := iohelper.NewPbBuffer() | |||
| err := bfrh.WritePBMessage(rh) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| bfr := iohelper.NewPbBuffer() | |||
| err = bfr.WritePBMessage(request.request) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| // Buf => | |||
| // | total size | pb1 size | pb1 | pb2 size | pb2 | ... | |||
| buf := iohelper.NewPbBuffer() | |||
| buf.WriteDelimitedBuffers(bfrh, bfr) | |||
| c.mu.Lock() | |||
| c.ongoingCalls[id] = request | |||
| c.in <- buf | |||
| c.mu.Unlock() | |||
| return nil | |||
| } | |||
| func (c *connection) close() error { | |||
| return c.conn.Close() | |||
| } | |||
| @@ -1,113 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| "fmt" | |||
| "math" | |||
| "strings" | |||
| ) | |||
| type Delete struct { | |||
| Row []byte | |||
| Families set | |||
| FamilyQuals map[string]set | |||
| Ts map[string]uint64 | |||
| } | |||
| func NewDelete(row []byte) *Delete { | |||
| return &Delete{ | |||
| Row: row, | |||
| Families: newSet(), | |||
| FamilyQuals: make(map[string]set), | |||
| Ts: make(map[string]uint64), | |||
| } | |||
| } | |||
| func (d *Delete) AddString(famqual string) error { | |||
| parts := strings.Split(famqual, ":") | |||
| if len(parts) > 2 { | |||
| return fmt.Errorf("Too many colons were found in the family:qualifier string. '%s'", famqual) | |||
| } else if len(parts) == 2 { | |||
| d.AddStringColumn(parts[0], parts[1]) | |||
| } else { | |||
| d.AddStringFamily(famqual) | |||
| } | |||
| return nil | |||
| } | |||
| func (d *Delete) GetRow() []byte { | |||
| return d.Row | |||
| } | |||
| func (d *Delete) AddColumn(family, qual []byte) *Delete { | |||
| d.AddFamily(family) | |||
| d.FamilyQuals[string(family)].add(string(qual)) | |||
| return d | |||
| } | |||
| func (d *Delete) AddStringColumn(family, qual string) *Delete { | |||
| return d.AddColumn([]byte(family), []byte(qual)) | |||
| } | |||
| func (d *Delete) AddFamily(family []byte) *Delete { | |||
| d.Families.add(string(family)) | |||
| if _, ok := d.FamilyQuals[string(family)]; !ok { | |||
| d.FamilyQuals[string(family)] = newSet() | |||
| } | |||
| return d | |||
| } | |||
| func (d *Delete) AddStringFamily(family string) *Delete { | |||
| return d.AddFamily([]byte(family)) | |||
| } | |||
| func (d *Delete) AddColumnWithTimestamp(family, qual []byte, ts uint64) *Delete { | |||
| d.AddColumn(family, qual) | |||
| k := string(family) + ":" + string(qual) | |||
| d.Ts[k] = ts | |||
| return d | |||
| } | |||
| func (d *Delete) ToProto() pb.Message { | |||
| del := &proto.MutationProto{ | |||
| Row: d.Row, | |||
| MutateType: proto.MutationProto_DELETE.Enum(), | |||
| } | |||
| for family := range d.Families { | |||
| cv := &proto.MutationProto_ColumnValue{ | |||
| Family: []byte(family), | |||
| QualifierValue: make([]*proto.MutationProto_ColumnValue_QualifierValue, 0), | |||
| } | |||
| if len(d.FamilyQuals[family]) == 0 { | |||
| cv.QualifierValue = append(cv.QualifierValue, &proto.MutationProto_ColumnValue_QualifierValue{ | |||
| Qualifier: nil, | |||
| Timestamp: pb.Uint64(uint64(math.MaxInt64)), | |||
| DeleteType: proto.MutationProto_DELETE_FAMILY.Enum(), | |||
| }) | |||
| } | |||
| for qual := range d.FamilyQuals[family] { | |||
| v := &proto.MutationProto_ColumnValue_QualifierValue{ | |||
| Qualifier: []byte(qual), | |||
| Timestamp: pb.Uint64(uint64(math.MaxInt64)), | |||
| DeleteType: proto.MutationProto_DELETE_MULTIPLE_VERSIONS.Enum(), | |||
| } | |||
| tsKey := string(family) + ":" + string(qual) | |||
| if ts, ok := d.Ts[tsKey]; ok { | |||
| v.Timestamp = pb.Uint64(ts) | |||
| v.DeleteType = proto.MutationProto_DELETE_ONE_VERSION.Enum() | |||
| } | |||
| cv.QualifierValue = append(cv.QualifierValue, v) | |||
| } | |||
| del.ColumnValue = append(del.ColumnValue, cv) | |||
| } | |||
| return del | |||
| } | |||
| @@ -1,105 +0,0 @@ | |||
| package hbase | |||
| import ( | |||
| "strings" | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/juju/errors" | |||
| "github.com/pingcap/go-hbase/proto" | |||
| ) | |||
| type Get struct { | |||
| Row []byte | |||
| Families set | |||
| FamilyQuals map[string]set | |||
| Versions int32 | |||
| TsRangeFrom uint64 | |||
| TsRangeTo uint64 | |||
| } | |||
| func NewGet(row []byte) *Get { | |||
| return &Get{ | |||
| Row: append([]byte(nil), row...), | |||
| Families: newSet(), | |||
| FamilyQuals: make(map[string]set), | |||
| Versions: 1, | |||
| } | |||
| } | |||
| func (g *Get) GetRow() []byte { | |||
| return g.Row | |||
| } | |||
| func (g *Get) AddString(famqual string) error { | |||
| parts := strings.Split(famqual, ":") | |||
| if len(parts) > 2 { | |||
| return errors.Errorf("Too many colons were found in the family:qualifier string. '%s'", famqual) | |||
| } else if len(parts) == 2 { | |||
| g.AddStringColumn(parts[0], parts[1]) | |||
| } else { | |||
| g.AddStringFamily(famqual) | |||
| } | |||
| return nil | |||
| } | |||
| func (g *Get) AddColumn(family, qual []byte) *Get { | |||
| g.AddFamily(family) | |||
| g.FamilyQuals[string(family)].add(string(qual)) | |||
| return g | |||
| } | |||
| func (g *Get) AddStringColumn(family, qual string) *Get { | |||
| return g.AddColumn([]byte(family), []byte(qual)) | |||
| } | |||
| func (g *Get) AddFamily(family []byte) *Get { | |||
| g.Families.add(string(family)) | |||
| if _, ok := g.FamilyQuals[string(family)]; !ok { | |||
| g.FamilyQuals[string(family)] = newSet() | |||
| } | |||
| return g | |||
| } | |||
| func (g *Get) AddStringFamily(family string) *Get { | |||
| return g.AddFamily([]byte(family)) | |||
| } | |||
| func (g *Get) AddTimeRange(from uint64, to uint64) *Get { | |||
| g.TsRangeFrom = from | |||
| g.TsRangeTo = to | |||
| return g | |||
| } | |||
| func (g *Get) SetMaxVersion(maxVersion int32) *Get { | |||
| g.Versions = maxVersion | |||
| return g | |||
| } | |||
| func (g *Get) ToProto() pb.Message { | |||
| get := &proto.Get{ | |||
| Row: g.Row, | |||
| } | |||
| if g.TsRangeFrom != 0 && g.TsRangeTo != 0 && g.TsRangeFrom <= g.TsRangeTo { | |||
| get.TimeRange = &proto.TimeRange{ | |||
| From: pb.Uint64(g.TsRangeFrom), | |||
| To: pb.Uint64(g.TsRangeTo), | |||
| } | |||
| } | |||
| for v := range g.Families { | |||
| col := &proto.Column{ | |||
| Family: []byte(v), | |||
| } | |||
| var quals [][]byte | |||
| for qual := range g.FamilyQuals[v] { | |||
| quals = append(quals, []byte(qual)) | |||
| } | |||
| col.Qualifier = quals | |||
| get.Column = append(get.Column, col) | |||
| } | |||
| get.MaxVersions = pb.Uint32(uint32(g.Versions)) | |||
| return get | |||
| } | |||
| @@ -1,8 +0,0 @@ | |||
| package iohelper | |||
| import "io" | |||
| type ByteMultiReader interface { | |||
| io.ByteReader | |||
| io.Reader | |||
| } | |||
| @@ -1,111 +0,0 @@ | |||
| package iohelper | |||
| import ( | |||
| "encoding/binary" | |||
| pb "github.com/golang/protobuf/proto" | |||
| "github.com/juju/errors" | |||
| ) | |||
| type PbBuffer struct { | |||
| b []byte | |||
| } | |||
| func NewPbBuffer() *PbBuffer { | |||
| b := []byte{} | |||
| return &PbBuffer{ | |||
| b: b, | |||
| } | |||
| } | |||
| func (b *PbBuffer) Bytes() []byte { | |||
| return b.b | |||
| } | |||
| func (b *PbBuffer) Write(d []byte) (int, error) { | |||
| b.b = append(b.b, d...) | |||
| return len(d), nil | |||
| } | |||
| func (b *PbBuffer) WriteByte(d byte) error { | |||
| return binary.Write(b, binary.BigEndian, d) | |||
| } | |||
| func (b *PbBuffer) WriteString(d string) error { | |||
| return binary.Write(b, binary.BigEndian, d) | |||
| } | |||
| func (b *PbBuffer) WriteInt32(d int32) error { | |||
| return binary.Write(b, binary.BigEndian, d) | |||
| } | |||
| func (b *PbBuffer) WriteInt64(d int64) error { | |||
| return binary.Write(b, binary.BigEndian, d) | |||
| } | |||
| func (b *PbBuffer) WriteFloat32(d float32) error { | |||
| return binary.Write(b, binary.BigEndian, d) | |||
| } | |||
| func (b *PbBuffer) WriteFloat64(d float64) error { | |||
| return binary.Write(b, binary.BigEndian, d) | |||
| } | |||
| func (b *PbBuffer) WritePBMessage(d pb.Message) error { | |||
| buf, err := pb.Marshal(d) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| _, err = b.Write(buf) | |||
| return errors.Trace(err) | |||
| } | |||
| func (b *PbBuffer) WriteDelimitedBuffers(bufs ...*PbBuffer) error { | |||
| totalLength := 0 | |||
| lens := make([][]byte, len(bufs)) | |||
| for i, v := range bufs { | |||
| n := len(v.Bytes()) | |||
| lenb := pb.EncodeVarint(uint64(n)) | |||
| totalLength += len(lenb) + n | |||
| lens[i] = lenb | |||
| } | |||
| err := b.WriteInt32(int32(totalLength)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| for i, v := range bufs { | |||
| _, err = b.Write(lens[i]) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| _, err = b.Write(v.Bytes()) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (b *PbBuffer) PrependSize() error { | |||
| size := int32(len(b.b)) | |||
| newBuf := NewPbBuffer() | |||
| err := newBuf.WriteInt32(size) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| _, err = newBuf.Write(b.b) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| *b = *newBuf | |||
| return nil | |||
| } | |||
| @@ -1,177 +0,0 @@ | |||
| package iohelper | |||
| import ( | |||
| "bytes" | |||
| "encoding/binary" | |||
| "io" | |||
| "github.com/juju/errors" | |||
| ) | |||
| var ( | |||
| cachedItob [][]byte | |||
| ) | |||
| func init() { | |||
| cachedItob = make([][]byte, 1024) | |||
| for i := 0; i < len(cachedItob); i++ { | |||
| var b bytes.Buffer | |||
| writeVLong(&b, int64(i)) | |||
| cachedItob[i] = b.Bytes() | |||
| } | |||
| } | |||
| func itob(i int) ([]byte, error) { | |||
| if i >= 0 && i < len(cachedItob) { | |||
| return cachedItob[i], nil | |||
| } | |||
| var b bytes.Buffer | |||
| err := binary.Write(&b, binary.BigEndian, i) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return b.Bytes(), nil | |||
| } | |||
| func decodeVIntSize(value byte) int32 { | |||
| if int32(value) >= -112 { | |||
| return int32(1) | |||
| } | |||
| if int32(value) < -120 { | |||
| return -119 - int32(value) | |||
| } | |||
| return -111 - int32(value) | |||
| } | |||
| func isNegativeVInt(value byte) bool { | |||
| return int32(value) < -120 || int32(value) >= -112 && int32(value) < 0 | |||
| } | |||
| func readVLong(r io.Reader) (int64, error) { | |||
| var firstByte byte | |||
| err := binary.Read(r, binary.BigEndian, &firstByte) | |||
| if err != nil { | |||
| return 0, errors.Trace(err) | |||
| } | |||
| l := decodeVIntSize(firstByte) | |||
| if l == 1 { | |||
| return int64(firstByte), nil | |||
| } | |||
| var ( | |||
| i int64 | |||
| idx int32 | |||
| ) | |||
| for idx = 0; idx < l-1; idx++ { | |||
| var b byte | |||
| err = binary.Read(r, binary.BigEndian, &b) | |||
| if err != nil { | |||
| return 0, errors.Trace(err) | |||
| } | |||
| i <<= 8 | |||
| i |= int64(b & 255) | |||
| } | |||
| if isNegativeVInt(firstByte) { | |||
| return ^i, nil | |||
| } | |||
| return i, nil | |||
| } | |||
| func writeVLong(w io.Writer, i int64) error { | |||
| var err error | |||
| if i >= -112 && i <= 127 { | |||
| err = binary.Write(w, binary.BigEndian, byte(i)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| } else { | |||
| var l int32 = -112 | |||
| if i < 0 { | |||
| i = ^i | |||
| l = -120 | |||
| } | |||
| var tmp int64 | |||
| for tmp = i; tmp != 0; l-- { | |||
| tmp >>= 8 | |||
| } | |||
| err = binary.Write(w, binary.BigEndian, byte(l)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| if l < -120 { | |||
| l = -(l + 120) | |||
| } else { | |||
| l = -(l + 112) | |||
| } | |||
| for idx := l; idx != 0; idx-- { | |||
| var mask int64 | |||
| shiftbits := uint((idx - 1) * 8) | |||
| mask = int64(255) << shiftbits | |||
| err = binary.Write(w, binary.BigEndian, byte((i&mask)>>shiftbits)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func ReadVarBytes(r ByteMultiReader) ([]byte, error) { | |||
| sz, err := readVLong(r) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| b := make([]byte, sz) | |||
| _, err = r.Read(b) | |||
| if err != nil { | |||
| return nil, errors.Trace(err) | |||
| } | |||
| return b, nil | |||
| } | |||
| func WriteVarBytes(w io.Writer, b []byte) error { | |||
| lenb, err := itob(len(b)) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| _, err = w.Write(lenb) | |||
| if err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| _, err = w.Write(b) | |||
| return errors.Trace(err) | |||
| } | |||
| func ReadInt32(r io.Reader) (int32, error) { | |||
| var n int32 | |||
| err := binary.Read(r, binary.BigEndian, &n) | |||
| return n, errors.Trace(err) | |||
| } | |||
| func ReadN(r io.Reader, n int32) ([]byte, error) { | |||
| b := make([]byte, n) | |||
| _, err := io.ReadFull(r, b) | |||
| return b, errors.Trace(err) | |||
| } | |||
| func ReadUint64(r io.Reader) (uint64, error) { | |||
| var n uint64 | |||
| err := binary.Read(r, binary.BigEndian, &n) | |||
| return n, errors.Trace(err) | |||
| } | |||
| @@ -1,451 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: AccessControl.proto | |||
| // DO NOT EDIT! | |||
| /* | |||
| Package proto is a generated protocol buffer package. | |||
| It is generated from these files: | |||
| AccessControl.proto | |||
| Admin.proto | |||
| Aggregate.proto | |||
| Authentication.proto | |||
| Cell.proto | |||
| Client.proto | |||
| ClusterId.proto | |||
| ClusterStatus.proto | |||
| Comparator.proto | |||
| Encryption.proto | |||
| ErrorHandling.proto | |||
| FS.proto | |||
| Filter.proto | |||
| HBase.proto | |||
| HFile.proto | |||
| LoadBalancer.proto | |||
| MapReduce.proto | |||
| Master.proto | |||
| MultiRowMutation.proto | |||
| RPC.proto | |||
| RegionServerStatus.proto | |||
| RowProcessor.proto | |||
| SecureBulkLoad.proto | |||
| Snapshot.proto | |||
| Themis.proto | |||
| Tracing.proto | |||
| VisibilityLabels.proto | |||
| WAL.proto | |||
| ZooKeeper.proto | |||
| It has these top-level messages: | |||
| Permission | |||
| TablePermission | |||
| NamespacePermission | |||
| GlobalPermission | |||
| UserPermission | |||
| UsersAndPermissions | |||
| GrantRequest | |||
| GrantResponse | |||
| RevokeRequest | |||
| RevokeResponse | |||
| GetUserPermissionsRequest | |||
| GetUserPermissionsResponse | |||
| CheckPermissionsRequest | |||
| CheckPermissionsResponse | |||
| */ | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type Permission_Action int32 | |||
| const ( | |||
| Permission_READ Permission_Action = 0 | |||
| Permission_WRITE Permission_Action = 1 | |||
| Permission_EXEC Permission_Action = 2 | |||
| Permission_CREATE Permission_Action = 3 | |||
| Permission_ADMIN Permission_Action = 4 | |||
| ) | |||
| var Permission_Action_name = map[int32]string{ | |||
| 0: "READ", | |||
| 1: "WRITE", | |||
| 2: "EXEC", | |||
| 3: "CREATE", | |||
| 4: "ADMIN", | |||
| } | |||
| var Permission_Action_value = map[string]int32{ | |||
| "READ": 0, | |||
| "WRITE": 1, | |||
| "EXEC": 2, | |||
| "CREATE": 3, | |||
| "ADMIN": 4, | |||
| } | |||
| func (x Permission_Action) Enum() *Permission_Action { | |||
| p := new(Permission_Action) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x Permission_Action) String() string { | |||
| return proto1.EnumName(Permission_Action_name, int32(x)) | |||
| } | |||
| func (x *Permission_Action) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(Permission_Action_value, data, "Permission_Action") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = Permission_Action(value) | |||
| return nil | |||
| } | |||
| type Permission_Type int32 | |||
| const ( | |||
| Permission_Global Permission_Type = 1 | |||
| Permission_Namespace Permission_Type = 2 | |||
| Permission_Table Permission_Type = 3 | |||
| ) | |||
| var Permission_Type_name = map[int32]string{ | |||
| 1: "Global", | |||
| 2: "Namespace", | |||
| 3: "Table", | |||
| } | |||
| var Permission_Type_value = map[string]int32{ | |||
| "Global": 1, | |||
| "Namespace": 2, | |||
| "Table": 3, | |||
| } | |||
| func (x Permission_Type) Enum() *Permission_Type { | |||
| p := new(Permission_Type) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x Permission_Type) String() string { | |||
| return proto1.EnumName(Permission_Type_name, int32(x)) | |||
| } | |||
| func (x *Permission_Type) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(Permission_Type_value, data, "Permission_Type") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = Permission_Type(value) | |||
| return nil | |||
| } | |||
| type Permission struct { | |||
| Type *Permission_Type `protobuf:"varint,1,req,name=type,enum=proto.Permission_Type" json:"type,omitempty"` | |||
| GlobalPermission *GlobalPermission `protobuf:"bytes,2,opt,name=global_permission" json:"global_permission,omitempty"` | |||
| NamespacePermission *NamespacePermission `protobuf:"bytes,3,opt,name=namespace_permission" json:"namespace_permission,omitempty"` | |||
| TablePermission *TablePermission `protobuf:"bytes,4,opt,name=table_permission" json:"table_permission,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Permission) Reset() { *m = Permission{} } | |||
| func (m *Permission) String() string { return proto1.CompactTextString(m) } | |||
| func (*Permission) ProtoMessage() {} | |||
| func (m *Permission) GetType() Permission_Type { | |||
| if m != nil && m.Type != nil { | |||
| return *m.Type | |||
| } | |||
| return Permission_Global | |||
| } | |||
| func (m *Permission) GetGlobalPermission() *GlobalPermission { | |||
| if m != nil { | |||
| return m.GlobalPermission | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Permission) GetNamespacePermission() *NamespacePermission { | |||
| if m != nil { | |||
| return m.NamespacePermission | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Permission) GetTablePermission() *TablePermission { | |||
| if m != nil { | |||
| return m.TablePermission | |||
| } | |||
| return nil | |||
| } | |||
| type TablePermission struct { | |||
| TableName *TableName `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` | |||
| Family []byte `protobuf:"bytes,2,opt,name=family" json:"family,omitempty"` | |||
| Qualifier []byte `protobuf:"bytes,3,opt,name=qualifier" json:"qualifier,omitempty"` | |||
| Action []Permission_Action `protobuf:"varint,4,rep,name=action,enum=proto.Permission_Action" json:"action,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *TablePermission) Reset() { *m = TablePermission{} } | |||
| func (m *TablePermission) String() string { return proto1.CompactTextString(m) } | |||
| func (*TablePermission) ProtoMessage() {} | |||
| func (m *TablePermission) GetTableName() *TableName { | |||
| if m != nil { | |||
| return m.TableName | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TablePermission) GetFamily() []byte { | |||
| if m != nil { | |||
| return m.Family | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TablePermission) GetQualifier() []byte { | |||
| if m != nil { | |||
| return m.Qualifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TablePermission) GetAction() []Permission_Action { | |||
| if m != nil { | |||
| return m.Action | |||
| } | |||
| return nil | |||
| } | |||
| type NamespacePermission struct { | |||
| NamespaceName []byte `protobuf:"bytes,1,opt,name=namespace_name" json:"namespace_name,omitempty"` | |||
| Action []Permission_Action `protobuf:"varint,2,rep,name=action,enum=proto.Permission_Action" json:"action,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *NamespacePermission) Reset() { *m = NamespacePermission{} } | |||
| func (m *NamespacePermission) String() string { return proto1.CompactTextString(m) } | |||
| func (*NamespacePermission) ProtoMessage() {} | |||
| func (m *NamespacePermission) GetNamespaceName() []byte { | |||
| if m != nil { | |||
| return m.NamespaceName | |||
| } | |||
| return nil | |||
| } | |||
| func (m *NamespacePermission) GetAction() []Permission_Action { | |||
| if m != nil { | |||
| return m.Action | |||
| } | |||
| return nil | |||
| } | |||
| type GlobalPermission struct { | |||
| Action []Permission_Action `protobuf:"varint,1,rep,name=action,enum=proto.Permission_Action" json:"action,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GlobalPermission) Reset() { *m = GlobalPermission{} } | |||
| func (m *GlobalPermission) String() string { return proto1.CompactTextString(m) } | |||
| func (*GlobalPermission) ProtoMessage() {} | |||
| func (m *GlobalPermission) GetAction() []Permission_Action { | |||
| if m != nil { | |||
| return m.Action | |||
| } | |||
| return nil | |||
| } | |||
| type UserPermission struct { | |||
| User []byte `protobuf:"bytes,1,req,name=user" json:"user,omitempty"` | |||
| Permission *Permission `protobuf:"bytes,3,req,name=permission" json:"permission,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UserPermission) Reset() { *m = UserPermission{} } | |||
| func (m *UserPermission) String() string { return proto1.CompactTextString(m) } | |||
| func (*UserPermission) ProtoMessage() {} | |||
| func (m *UserPermission) GetUser() []byte { | |||
| if m != nil { | |||
| return m.User | |||
| } | |||
| return nil | |||
| } | |||
| func (m *UserPermission) GetPermission() *Permission { | |||
| if m != nil { | |||
| return m.Permission | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Content of the /hbase/acl/<table or namespace> znode. | |||
| type UsersAndPermissions struct { | |||
| UserPermissions []*UsersAndPermissions_UserPermissions `protobuf:"bytes,1,rep,name=user_permissions" json:"user_permissions,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UsersAndPermissions) Reset() { *m = UsersAndPermissions{} } | |||
| func (m *UsersAndPermissions) String() string { return proto1.CompactTextString(m) } | |||
| func (*UsersAndPermissions) ProtoMessage() {} | |||
| func (m *UsersAndPermissions) GetUserPermissions() []*UsersAndPermissions_UserPermissions { | |||
| if m != nil { | |||
| return m.UserPermissions | |||
| } | |||
| return nil | |||
| } | |||
| type UsersAndPermissions_UserPermissions struct { | |||
| User []byte `protobuf:"bytes,1,req,name=user" json:"user,omitempty"` | |||
| Permissions []*Permission `protobuf:"bytes,2,rep,name=permissions" json:"permissions,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UsersAndPermissions_UserPermissions) Reset() { *m = UsersAndPermissions_UserPermissions{} } | |||
| func (m *UsersAndPermissions_UserPermissions) String() string { return proto1.CompactTextString(m) } | |||
| func (*UsersAndPermissions_UserPermissions) ProtoMessage() {} | |||
| func (m *UsersAndPermissions_UserPermissions) GetUser() []byte { | |||
| if m != nil { | |||
| return m.User | |||
| } | |||
| return nil | |||
| } | |||
| func (m *UsersAndPermissions_UserPermissions) GetPermissions() []*Permission { | |||
| if m != nil { | |||
| return m.Permissions | |||
| } | |||
| return nil | |||
| } | |||
| type GrantRequest struct { | |||
| UserPermission *UserPermission `protobuf:"bytes,1,req,name=user_permission" json:"user_permission,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GrantRequest) Reset() { *m = GrantRequest{} } | |||
| func (m *GrantRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GrantRequest) ProtoMessage() {} | |||
| func (m *GrantRequest) GetUserPermission() *UserPermission { | |||
| if m != nil { | |||
| return m.UserPermission | |||
| } | |||
| return nil | |||
| } | |||
| type GrantResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GrantResponse) Reset() { *m = GrantResponse{} } | |||
| func (m *GrantResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GrantResponse) ProtoMessage() {} | |||
| type RevokeRequest struct { | |||
| UserPermission *UserPermission `protobuf:"bytes,1,req,name=user_permission" json:"user_permission,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RevokeRequest) Reset() { *m = RevokeRequest{} } | |||
| func (m *RevokeRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*RevokeRequest) ProtoMessage() {} | |||
| func (m *RevokeRequest) GetUserPermission() *UserPermission { | |||
| if m != nil { | |||
| return m.UserPermission | |||
| } | |||
| return nil | |||
| } | |||
| type RevokeResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RevokeResponse) Reset() { *m = RevokeResponse{} } | |||
| func (m *RevokeResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*RevokeResponse) ProtoMessage() {} | |||
| type GetUserPermissionsRequest struct { | |||
| Type *Permission_Type `protobuf:"varint,1,opt,name=type,enum=proto.Permission_Type" json:"type,omitempty"` | |||
| TableName *TableName `protobuf:"bytes,2,opt,name=table_name" json:"table_name,omitempty"` | |||
| NamespaceName []byte `protobuf:"bytes,3,opt,name=namespace_name" json:"namespace_name,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetUserPermissionsRequest) Reset() { *m = GetUserPermissionsRequest{} } | |||
| func (m *GetUserPermissionsRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetUserPermissionsRequest) ProtoMessage() {} | |||
| func (m *GetUserPermissionsRequest) GetType() Permission_Type { | |||
| if m != nil && m.Type != nil { | |||
| return *m.Type | |||
| } | |||
| return Permission_Global | |||
| } | |||
| func (m *GetUserPermissionsRequest) GetTableName() *TableName { | |||
| if m != nil { | |||
| return m.TableName | |||
| } | |||
| return nil | |||
| } | |||
| func (m *GetUserPermissionsRequest) GetNamespaceName() []byte { | |||
| if m != nil { | |||
| return m.NamespaceName | |||
| } | |||
| return nil | |||
| } | |||
| type GetUserPermissionsResponse struct { | |||
| UserPermission []*UserPermission `protobuf:"bytes,1,rep,name=user_permission" json:"user_permission,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetUserPermissionsResponse) Reset() { *m = GetUserPermissionsResponse{} } | |||
| func (m *GetUserPermissionsResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetUserPermissionsResponse) ProtoMessage() {} | |||
| func (m *GetUserPermissionsResponse) GetUserPermission() []*UserPermission { | |||
| if m != nil { | |||
| return m.UserPermission | |||
| } | |||
| return nil | |||
| } | |||
| type CheckPermissionsRequest struct { | |||
| Permission []*Permission `protobuf:"bytes,1,rep,name=permission" json:"permission,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CheckPermissionsRequest) Reset() { *m = CheckPermissionsRequest{} } | |||
| func (m *CheckPermissionsRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*CheckPermissionsRequest) ProtoMessage() {} | |||
| func (m *CheckPermissionsRequest) GetPermission() []*Permission { | |||
| if m != nil { | |||
| return m.Permission | |||
| } | |||
| return nil | |||
| } | |||
| type CheckPermissionsResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CheckPermissionsResponse) Reset() { *m = CheckPermissionsResponse{} } | |||
| func (m *CheckPermissionsResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*CheckPermissionsResponse) ProtoMessage() {} | |||
| func init() { | |||
| proto1.RegisterEnum("proto.Permission_Action", Permission_Action_name, Permission_Action_value) | |||
| proto1.RegisterEnum("proto.Permission_Type", Permission_Type_name, Permission_Type_value) | |||
| } | |||
| @@ -1,769 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Admin.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type GetRegionInfoResponse_CompactionState int32 | |||
| const ( | |||
| GetRegionInfoResponse_NONE GetRegionInfoResponse_CompactionState = 0 | |||
| GetRegionInfoResponse_MINOR GetRegionInfoResponse_CompactionState = 1 | |||
| GetRegionInfoResponse_MAJOR GetRegionInfoResponse_CompactionState = 2 | |||
| GetRegionInfoResponse_MAJOR_AND_MINOR GetRegionInfoResponse_CompactionState = 3 | |||
| ) | |||
| var GetRegionInfoResponse_CompactionState_name = map[int32]string{ | |||
| 0: "NONE", | |||
| 1: "MINOR", | |||
| 2: "MAJOR", | |||
| 3: "MAJOR_AND_MINOR", | |||
| } | |||
| var GetRegionInfoResponse_CompactionState_value = map[string]int32{ | |||
| "NONE": 0, | |||
| "MINOR": 1, | |||
| "MAJOR": 2, | |||
| "MAJOR_AND_MINOR": 3, | |||
| } | |||
| func (x GetRegionInfoResponse_CompactionState) Enum() *GetRegionInfoResponse_CompactionState { | |||
| p := new(GetRegionInfoResponse_CompactionState) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x GetRegionInfoResponse_CompactionState) String() string { | |||
| return proto1.EnumName(GetRegionInfoResponse_CompactionState_name, int32(x)) | |||
| } | |||
| func (x *GetRegionInfoResponse_CompactionState) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(GetRegionInfoResponse_CompactionState_value, data, "GetRegionInfoResponse_CompactionState") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = GetRegionInfoResponse_CompactionState(value) | |||
| return nil | |||
| } | |||
| type OpenRegionResponse_RegionOpeningState int32 | |||
| const ( | |||
| OpenRegionResponse_OPENED OpenRegionResponse_RegionOpeningState = 0 | |||
| OpenRegionResponse_ALREADY_OPENED OpenRegionResponse_RegionOpeningState = 1 | |||
| OpenRegionResponse_FAILED_OPENING OpenRegionResponse_RegionOpeningState = 2 | |||
| ) | |||
| var OpenRegionResponse_RegionOpeningState_name = map[int32]string{ | |||
| 0: "OPENED", | |||
| 1: "ALREADY_OPENED", | |||
| 2: "FAILED_OPENING", | |||
| } | |||
| var OpenRegionResponse_RegionOpeningState_value = map[string]int32{ | |||
| "OPENED": 0, | |||
| "ALREADY_OPENED": 1, | |||
| "FAILED_OPENING": 2, | |||
| } | |||
| func (x OpenRegionResponse_RegionOpeningState) Enum() *OpenRegionResponse_RegionOpeningState { | |||
| p := new(OpenRegionResponse_RegionOpeningState) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x OpenRegionResponse_RegionOpeningState) String() string { | |||
| return proto1.EnumName(OpenRegionResponse_RegionOpeningState_name, int32(x)) | |||
| } | |||
| func (x *OpenRegionResponse_RegionOpeningState) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(OpenRegionResponse_RegionOpeningState_value, data, "OpenRegionResponse_RegionOpeningState") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = OpenRegionResponse_RegionOpeningState(value) | |||
| return nil | |||
| } | |||
| type GetRegionInfoRequest struct { | |||
| Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| CompactionState *bool `protobuf:"varint,2,opt,name=compaction_state" json:"compaction_state,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetRegionInfoRequest) Reset() { *m = GetRegionInfoRequest{} } | |||
| func (m *GetRegionInfoRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetRegionInfoRequest) ProtoMessage() {} | |||
| func (m *GetRegionInfoRequest) GetRegion() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *GetRegionInfoRequest) GetCompactionState() bool { | |||
| if m != nil && m.CompactionState != nil { | |||
| return *m.CompactionState | |||
| } | |||
| return false | |||
| } | |||
| type GetRegionInfoResponse struct { | |||
| RegionInfo *RegionInfo `protobuf:"bytes,1,req,name=region_info" json:"region_info,omitempty"` | |||
| CompactionState *GetRegionInfoResponse_CompactionState `protobuf:"varint,2,opt,name=compaction_state,enum=proto.GetRegionInfoResponse_CompactionState" json:"compaction_state,omitempty"` | |||
| IsRecovering *bool `protobuf:"varint,3,opt,name=isRecovering" json:"isRecovering,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetRegionInfoResponse) Reset() { *m = GetRegionInfoResponse{} } | |||
| func (m *GetRegionInfoResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetRegionInfoResponse) ProtoMessage() {} | |||
| func (m *GetRegionInfoResponse) GetRegionInfo() *RegionInfo { | |||
| if m != nil { | |||
| return m.RegionInfo | |||
| } | |||
| return nil | |||
| } | |||
| func (m *GetRegionInfoResponse) GetCompactionState() GetRegionInfoResponse_CompactionState { | |||
| if m != nil && m.CompactionState != nil { | |||
| return *m.CompactionState | |||
| } | |||
| return GetRegionInfoResponse_NONE | |||
| } | |||
| func (m *GetRegionInfoResponse) GetIsRecovering() bool { | |||
| if m != nil && m.IsRecovering != nil { | |||
| return *m.IsRecovering | |||
| } | |||
| return false | |||
| } | |||
| // * | |||
| // Get a list of store files for a set of column families in a particular region. | |||
| // If no column family is specified, get the store files for all column families. | |||
| type GetStoreFileRequest struct { | |||
| Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| Family [][]byte `protobuf:"bytes,2,rep,name=family" json:"family,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetStoreFileRequest) Reset() { *m = GetStoreFileRequest{} } | |||
| func (m *GetStoreFileRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetStoreFileRequest) ProtoMessage() {} | |||
| func (m *GetStoreFileRequest) GetRegion() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *GetStoreFileRequest) GetFamily() [][]byte { | |||
| if m != nil { | |||
| return m.Family | |||
| } | |||
| return nil | |||
| } | |||
| type GetStoreFileResponse struct { | |||
| StoreFile []string `protobuf:"bytes,1,rep,name=store_file" json:"store_file,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetStoreFileResponse) Reset() { *m = GetStoreFileResponse{} } | |||
| func (m *GetStoreFileResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetStoreFileResponse) ProtoMessage() {} | |||
| func (m *GetStoreFileResponse) GetStoreFile() []string { | |||
| if m != nil { | |||
| return m.StoreFile | |||
| } | |||
| return nil | |||
| } | |||
| type GetOnlineRegionRequest struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetOnlineRegionRequest) Reset() { *m = GetOnlineRegionRequest{} } | |||
| func (m *GetOnlineRegionRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetOnlineRegionRequest) ProtoMessage() {} | |||
| type GetOnlineRegionResponse struct { | |||
| RegionInfo []*RegionInfo `protobuf:"bytes,1,rep,name=region_info" json:"region_info,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetOnlineRegionResponse) Reset() { *m = GetOnlineRegionResponse{} } | |||
| func (m *GetOnlineRegionResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetOnlineRegionResponse) ProtoMessage() {} | |||
| func (m *GetOnlineRegionResponse) GetRegionInfo() []*RegionInfo { | |||
| if m != nil { | |||
| return m.RegionInfo | |||
| } | |||
| return nil | |||
| } | |||
| type OpenRegionRequest struct { | |||
| OpenInfo []*OpenRegionRequest_RegionOpenInfo `protobuf:"bytes,1,rep,name=open_info" json:"open_info,omitempty"` | |||
| // the intended server for this RPC. | |||
| ServerStartCode *uint64 `protobuf:"varint,2,opt,name=serverStartCode" json:"serverStartCode,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *OpenRegionRequest) Reset() { *m = OpenRegionRequest{} } | |||
| func (m *OpenRegionRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*OpenRegionRequest) ProtoMessage() {} | |||
| func (m *OpenRegionRequest) GetOpenInfo() []*OpenRegionRequest_RegionOpenInfo { | |||
| if m != nil { | |||
| return m.OpenInfo | |||
| } | |||
| return nil | |||
| } | |||
| func (m *OpenRegionRequest) GetServerStartCode() uint64 { | |||
| if m != nil && m.ServerStartCode != nil { | |||
| return *m.ServerStartCode | |||
| } | |||
| return 0 | |||
| } | |||
| type OpenRegionRequest_RegionOpenInfo struct { | |||
| Region *RegionInfo `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| VersionOfOfflineNode *uint32 `protobuf:"varint,2,opt,name=version_of_offline_node" json:"version_of_offline_node,omitempty"` | |||
| FavoredNodes []*ServerName `protobuf:"bytes,3,rep,name=favored_nodes" json:"favored_nodes,omitempty"` | |||
| // open region for distributedLogReplay | |||
| OpenForDistributedLogReplay *bool `protobuf:"varint,4,opt,name=openForDistributedLogReplay" json:"openForDistributedLogReplay,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *OpenRegionRequest_RegionOpenInfo) Reset() { *m = OpenRegionRequest_RegionOpenInfo{} } | |||
| func (m *OpenRegionRequest_RegionOpenInfo) String() string { return proto1.CompactTextString(m) } | |||
| func (*OpenRegionRequest_RegionOpenInfo) ProtoMessage() {} | |||
| func (m *OpenRegionRequest_RegionOpenInfo) GetRegion() *RegionInfo { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *OpenRegionRequest_RegionOpenInfo) GetVersionOfOfflineNode() uint32 { | |||
| if m != nil && m.VersionOfOfflineNode != nil { | |||
| return *m.VersionOfOfflineNode | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *OpenRegionRequest_RegionOpenInfo) GetFavoredNodes() []*ServerName { | |||
| if m != nil { | |||
| return m.FavoredNodes | |||
| } | |||
| return nil | |||
| } | |||
| func (m *OpenRegionRequest_RegionOpenInfo) GetOpenForDistributedLogReplay() bool { | |||
| if m != nil && m.OpenForDistributedLogReplay != nil { | |||
| return *m.OpenForDistributedLogReplay | |||
| } | |||
| return false | |||
| } | |||
| type OpenRegionResponse struct { | |||
| OpeningState []OpenRegionResponse_RegionOpeningState `protobuf:"varint,1,rep,name=opening_state,enum=proto.OpenRegionResponse_RegionOpeningState" json:"opening_state,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *OpenRegionResponse) Reset() { *m = OpenRegionResponse{} } | |||
| func (m *OpenRegionResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*OpenRegionResponse) ProtoMessage() {} | |||
| func (m *OpenRegionResponse) GetOpeningState() []OpenRegionResponse_RegionOpeningState { | |||
| if m != nil { | |||
| return m.OpeningState | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Closes the specified region and will use or not use ZK during the close | |||
| // according to the specified flag. | |||
| type CloseRegionRequest struct { | |||
| Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| VersionOfClosingNode *uint32 `protobuf:"varint,2,opt,name=version_of_closing_node" json:"version_of_closing_node,omitempty"` | |||
| TransitionIn_ZK *bool `protobuf:"varint,3,opt,name=transition_in_ZK,def=1" json:"transition_in_ZK,omitempty"` | |||
| DestinationServer *ServerName `protobuf:"bytes,4,opt,name=destination_server" json:"destination_server,omitempty"` | |||
| // the intended server for this RPC. | |||
| ServerStartCode *uint64 `protobuf:"varint,5,opt,name=serverStartCode" json:"serverStartCode,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CloseRegionRequest) Reset() { *m = CloseRegionRequest{} } | |||
| func (m *CloseRegionRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*CloseRegionRequest) ProtoMessage() {} | |||
| const Default_CloseRegionRequest_TransitionIn_ZK bool = true | |||
| func (m *CloseRegionRequest) GetRegion() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *CloseRegionRequest) GetVersionOfClosingNode() uint32 { | |||
| if m != nil && m.VersionOfClosingNode != nil { | |||
| return *m.VersionOfClosingNode | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *CloseRegionRequest) GetTransitionIn_ZK() bool { | |||
| if m != nil && m.TransitionIn_ZK != nil { | |||
| return *m.TransitionIn_ZK | |||
| } | |||
| return Default_CloseRegionRequest_TransitionIn_ZK | |||
| } | |||
| func (m *CloseRegionRequest) GetDestinationServer() *ServerName { | |||
| if m != nil { | |||
| return m.DestinationServer | |||
| } | |||
| return nil | |||
| } | |||
| func (m *CloseRegionRequest) GetServerStartCode() uint64 { | |||
| if m != nil && m.ServerStartCode != nil { | |||
| return *m.ServerStartCode | |||
| } | |||
| return 0 | |||
| } | |||
| type CloseRegionResponse struct { | |||
| Closed *bool `protobuf:"varint,1,req,name=closed" json:"closed,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CloseRegionResponse) Reset() { *m = CloseRegionResponse{} } | |||
| func (m *CloseRegionResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*CloseRegionResponse) ProtoMessage() {} | |||
| func (m *CloseRegionResponse) GetClosed() bool { | |||
| if m != nil && m.Closed != nil { | |||
| return *m.Closed | |||
| } | |||
| return false | |||
| } | |||
| // * | |||
| // Flushes the MemStore of the specified region. | |||
| // <p> | |||
| // This method is synchronous. | |||
| type FlushRegionRequest struct { | |||
| Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| IfOlderThanTs *uint64 `protobuf:"varint,2,opt,name=if_older_than_ts" json:"if_older_than_ts,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FlushRegionRequest) Reset() { *m = FlushRegionRequest{} } | |||
| func (m *FlushRegionRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*FlushRegionRequest) ProtoMessage() {} | |||
| func (m *FlushRegionRequest) GetRegion() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *FlushRegionRequest) GetIfOlderThanTs() uint64 { | |||
| if m != nil && m.IfOlderThanTs != nil { | |||
| return *m.IfOlderThanTs | |||
| } | |||
| return 0 | |||
| } | |||
| type FlushRegionResponse struct { | |||
| LastFlushTime *uint64 `protobuf:"varint,1,req,name=last_flush_time" json:"last_flush_time,omitempty"` | |||
| Flushed *bool `protobuf:"varint,2,opt,name=flushed" json:"flushed,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FlushRegionResponse) Reset() { *m = FlushRegionResponse{} } | |||
| func (m *FlushRegionResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*FlushRegionResponse) ProtoMessage() {} | |||
| func (m *FlushRegionResponse) GetLastFlushTime() uint64 { | |||
| if m != nil && m.LastFlushTime != nil { | |||
| return *m.LastFlushTime | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FlushRegionResponse) GetFlushed() bool { | |||
| if m != nil && m.Flushed != nil { | |||
| return *m.Flushed | |||
| } | |||
| return false | |||
| } | |||
| // * | |||
| // Splits the specified region. | |||
| // <p> | |||
| // This method currently flushes the region and then forces a compaction which | |||
| // will then trigger a split. The flush is done synchronously but the | |||
| // compaction is asynchronous. | |||
| type SplitRegionRequest struct { | |||
| Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| SplitPoint []byte `protobuf:"bytes,2,opt,name=split_point" json:"split_point,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SplitRegionRequest) Reset() { *m = SplitRegionRequest{} } | |||
| func (m *SplitRegionRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*SplitRegionRequest) ProtoMessage() {} | |||
| func (m *SplitRegionRequest) GetRegion() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *SplitRegionRequest) GetSplitPoint() []byte { | |||
| if m != nil { | |||
| return m.SplitPoint | |||
| } | |||
| return nil | |||
| } | |||
| type SplitRegionResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SplitRegionResponse) Reset() { *m = SplitRegionResponse{} } | |||
| func (m *SplitRegionResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*SplitRegionResponse) ProtoMessage() {} | |||
| // * | |||
| // Compacts the specified region. Performs a major compaction if specified. | |||
| // <p> | |||
| // This method is asynchronous. | |||
| type CompactRegionRequest struct { | |||
| Region *RegionSpecifier `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| Major *bool `protobuf:"varint,2,opt,name=major" json:"major,omitempty"` | |||
| Family []byte `protobuf:"bytes,3,opt,name=family" json:"family,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CompactRegionRequest) Reset() { *m = CompactRegionRequest{} } | |||
| func (m *CompactRegionRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*CompactRegionRequest) ProtoMessage() {} | |||
| func (m *CompactRegionRequest) GetRegion() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *CompactRegionRequest) GetMajor() bool { | |||
| if m != nil && m.Major != nil { | |||
| return *m.Major | |||
| } | |||
| return false | |||
| } | |||
| func (m *CompactRegionRequest) GetFamily() []byte { | |||
| if m != nil { | |||
| return m.Family | |||
| } | |||
| return nil | |||
| } | |||
| type CompactRegionResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CompactRegionResponse) Reset() { *m = CompactRegionResponse{} } | |||
| func (m *CompactRegionResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*CompactRegionResponse) ProtoMessage() {} | |||
| type UpdateFavoredNodesRequest struct { | |||
| UpdateInfo []*UpdateFavoredNodesRequest_RegionUpdateInfo `protobuf:"bytes,1,rep,name=update_info" json:"update_info,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UpdateFavoredNodesRequest) Reset() { *m = UpdateFavoredNodesRequest{} } | |||
| func (m *UpdateFavoredNodesRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*UpdateFavoredNodesRequest) ProtoMessage() {} | |||
| func (m *UpdateFavoredNodesRequest) GetUpdateInfo() []*UpdateFavoredNodesRequest_RegionUpdateInfo { | |||
| if m != nil { | |||
| return m.UpdateInfo | |||
| } | |||
| return nil | |||
| } | |||
| type UpdateFavoredNodesRequest_RegionUpdateInfo struct { | |||
| Region *RegionInfo `protobuf:"bytes,1,req,name=region" json:"region,omitempty"` | |||
| FavoredNodes []*ServerName `protobuf:"bytes,2,rep,name=favored_nodes" json:"favored_nodes,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UpdateFavoredNodesRequest_RegionUpdateInfo) Reset() { | |||
| *m = UpdateFavoredNodesRequest_RegionUpdateInfo{} | |||
| } | |||
| func (m *UpdateFavoredNodesRequest_RegionUpdateInfo) String() string { | |||
| return proto1.CompactTextString(m) | |||
| } | |||
| func (*UpdateFavoredNodesRequest_RegionUpdateInfo) ProtoMessage() {} | |||
| func (m *UpdateFavoredNodesRequest_RegionUpdateInfo) GetRegion() *RegionInfo { | |||
| if m != nil { | |||
| return m.Region | |||
| } | |||
| return nil | |||
| } | |||
| func (m *UpdateFavoredNodesRequest_RegionUpdateInfo) GetFavoredNodes() []*ServerName { | |||
| if m != nil { | |||
| return m.FavoredNodes | |||
| } | |||
| return nil | |||
| } | |||
| type UpdateFavoredNodesResponse struct { | |||
| Response *uint32 `protobuf:"varint,1,opt,name=response" json:"response,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UpdateFavoredNodesResponse) Reset() { *m = UpdateFavoredNodesResponse{} } | |||
| func (m *UpdateFavoredNodesResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*UpdateFavoredNodesResponse) ProtoMessage() {} | |||
| func (m *UpdateFavoredNodesResponse) GetResponse() uint32 { | |||
| if m != nil && m.Response != nil { | |||
| return *m.Response | |||
| } | |||
| return 0 | |||
| } | |||
| // * | |||
| // Merges the specified regions. | |||
| // <p> | |||
| // This method currently closes the regions and then merges them | |||
| type MergeRegionsRequest struct { | |||
| RegionA *RegionSpecifier `protobuf:"bytes,1,req,name=region_a" json:"region_a,omitempty"` | |||
| RegionB *RegionSpecifier `protobuf:"bytes,2,req,name=region_b" json:"region_b,omitempty"` | |||
| Forcible *bool `protobuf:"varint,3,opt,name=forcible,def=0" json:"forcible,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *MergeRegionsRequest) Reset() { *m = MergeRegionsRequest{} } | |||
| func (m *MergeRegionsRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*MergeRegionsRequest) ProtoMessage() {} | |||
| const Default_MergeRegionsRequest_Forcible bool = false | |||
| func (m *MergeRegionsRequest) GetRegionA() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.RegionA | |||
| } | |||
| return nil | |||
| } | |||
| func (m *MergeRegionsRequest) GetRegionB() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.RegionB | |||
| } | |||
| return nil | |||
| } | |||
| func (m *MergeRegionsRequest) GetForcible() bool { | |||
| if m != nil && m.Forcible != nil { | |||
| return *m.Forcible | |||
| } | |||
| return Default_MergeRegionsRequest_Forcible | |||
| } | |||
| type MergeRegionsResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *MergeRegionsResponse) Reset() { *m = MergeRegionsResponse{} } | |||
| func (m *MergeRegionsResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*MergeRegionsResponse) ProtoMessage() {} | |||
| // Protocol buffer version of WAL for replication | |||
| type WALEntry struct { | |||
| Key *WALKey `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` | |||
| // Following may be null if the KVs/Cells are carried along the side in a cellblock (See | |||
| // RPC for more on cellblocks). If Cells/KVs are in a cellblock, this next field is null | |||
| // and associated_cell_count has count of Cells associated w/ this WALEntry | |||
| KeyValueBytes [][]byte `protobuf:"bytes,2,rep,name=key_value_bytes" json:"key_value_bytes,omitempty"` | |||
| // If Cell data is carried alongside in a cellblock, this is count of Cells in the cellblock. | |||
| AssociatedCellCount *int32 `protobuf:"varint,3,opt,name=associated_cell_count" json:"associated_cell_count,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *WALEntry) Reset() { *m = WALEntry{} } | |||
| func (m *WALEntry) String() string { return proto1.CompactTextString(m) } | |||
| func (*WALEntry) ProtoMessage() {} | |||
| func (m *WALEntry) GetKey() *WALKey { | |||
| if m != nil { | |||
| return m.Key | |||
| } | |||
| return nil | |||
| } | |||
| func (m *WALEntry) GetKeyValueBytes() [][]byte { | |||
| if m != nil { | |||
| return m.KeyValueBytes | |||
| } | |||
| return nil | |||
| } | |||
| func (m *WALEntry) GetAssociatedCellCount() int32 { | |||
| if m != nil && m.AssociatedCellCount != nil { | |||
| return *m.AssociatedCellCount | |||
| } | |||
| return 0 | |||
| } | |||
| // * | |||
| // Replicates the given entries. The guarantee is that the given entries | |||
| // will be durable on the slave cluster if this method returns without | |||
| // any exception. hbase.replication has to be set to true for this to work. | |||
| type ReplicateWALEntryRequest struct { | |||
| Entry []*WALEntry `protobuf:"bytes,1,rep,name=entry" json:"entry,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ReplicateWALEntryRequest) Reset() { *m = ReplicateWALEntryRequest{} } | |||
| func (m *ReplicateWALEntryRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*ReplicateWALEntryRequest) ProtoMessage() {} | |||
| func (m *ReplicateWALEntryRequest) GetEntry() []*WALEntry { | |||
| if m != nil { | |||
| return m.Entry | |||
| } | |||
| return nil | |||
| } | |||
| type ReplicateWALEntryResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ReplicateWALEntryResponse) Reset() { *m = ReplicateWALEntryResponse{} } | |||
| func (m *ReplicateWALEntryResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*ReplicateWALEntryResponse) ProtoMessage() {} | |||
| type RollWALWriterRequest struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RollWALWriterRequest) Reset() { *m = RollWALWriterRequest{} } | |||
| func (m *RollWALWriterRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*RollWALWriterRequest) ProtoMessage() {} | |||
| type RollWALWriterResponse struct { | |||
| // A list of encoded name of regions to flush | |||
| RegionToFlush [][]byte `protobuf:"bytes,1,rep,name=region_to_flush" json:"region_to_flush,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RollWALWriterResponse) Reset() { *m = RollWALWriterResponse{} } | |||
| func (m *RollWALWriterResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*RollWALWriterResponse) ProtoMessage() {} | |||
| func (m *RollWALWriterResponse) GetRegionToFlush() [][]byte { | |||
| if m != nil { | |||
| return m.RegionToFlush | |||
| } | |||
| return nil | |||
| } | |||
| type StopServerRequest struct { | |||
| Reason *string `protobuf:"bytes,1,req,name=reason" json:"reason,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *StopServerRequest) Reset() { *m = StopServerRequest{} } | |||
| func (m *StopServerRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*StopServerRequest) ProtoMessage() {} | |||
| func (m *StopServerRequest) GetReason() string { | |||
| if m != nil && m.Reason != nil { | |||
| return *m.Reason | |||
| } | |||
| return "" | |||
| } | |||
| type StopServerResponse struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *StopServerResponse) Reset() { *m = StopServerResponse{} } | |||
| func (m *StopServerResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*StopServerResponse) ProtoMessage() {} | |||
| type GetServerInfoRequest struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetServerInfoRequest) Reset() { *m = GetServerInfoRequest{} } | |||
| func (m *GetServerInfoRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetServerInfoRequest) ProtoMessage() {} | |||
| type ServerInfo struct { | |||
| ServerName *ServerName `protobuf:"bytes,1,req,name=server_name" json:"server_name,omitempty"` | |||
| WebuiPort *uint32 `protobuf:"varint,2,opt,name=webui_port" json:"webui_port,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ServerInfo) Reset() { *m = ServerInfo{} } | |||
| func (m *ServerInfo) String() string { return proto1.CompactTextString(m) } | |||
| func (*ServerInfo) ProtoMessage() {} | |||
| func (m *ServerInfo) GetServerName() *ServerName { | |||
| if m != nil { | |||
| return m.ServerName | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ServerInfo) GetWebuiPort() uint32 { | |||
| if m != nil && m.WebuiPort != nil { | |||
| return *m.WebuiPort | |||
| } | |||
| return 0 | |||
| } | |||
| type GetServerInfoResponse struct { | |||
| ServerInfo *ServerInfo `protobuf:"bytes,1,req,name=server_info" json:"server_info,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetServerInfoResponse) Reset() { *m = GetServerInfoResponse{} } | |||
| func (m *GetServerInfoResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetServerInfoResponse) ProtoMessage() {} | |||
| func (m *GetServerInfoResponse) GetServerInfo() *ServerInfo { | |||
| if m != nil { | |||
| return m.ServerInfo | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.GetRegionInfoResponse_CompactionState", GetRegionInfoResponse_CompactionState_name, GetRegionInfoResponse_CompactionState_value) | |||
| proto1.RegisterEnum("proto.OpenRegionResponse_RegionOpeningState", OpenRegionResponse_RegionOpeningState_name, OpenRegionResponse_RegionOpeningState_value) | |||
| } | |||
| @@ -1,82 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Aggregate.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type AggregateRequest struct { | |||
| // * The request passed to the AggregateService consists of three parts | |||
| // (1) the (canonical) classname of the ColumnInterpreter implementation | |||
| // (2) the Scan query | |||
| // (3) any bytes required to construct the ColumnInterpreter object | |||
| // properly | |||
| InterpreterClassName *string `protobuf:"bytes,1,req,name=interpreter_class_name" json:"interpreter_class_name,omitempty"` | |||
| Scan *Scan `protobuf:"bytes,2,req,name=scan" json:"scan,omitempty"` | |||
| InterpreterSpecificBytes []byte `protobuf:"bytes,3,opt,name=interpreter_specific_bytes" json:"interpreter_specific_bytes,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *AggregateRequest) Reset() { *m = AggregateRequest{} } | |||
| func (m *AggregateRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*AggregateRequest) ProtoMessage() {} | |||
| func (m *AggregateRequest) GetInterpreterClassName() string { | |||
| if m != nil && m.InterpreterClassName != nil { | |||
| return *m.InterpreterClassName | |||
| } | |||
| return "" | |||
| } | |||
| func (m *AggregateRequest) GetScan() *Scan { | |||
| if m != nil { | |||
| return m.Scan | |||
| } | |||
| return nil | |||
| } | |||
| func (m *AggregateRequest) GetInterpreterSpecificBytes() []byte { | |||
| if m != nil { | |||
| return m.InterpreterSpecificBytes | |||
| } | |||
| return nil | |||
| } | |||
| type AggregateResponse struct { | |||
| // * | |||
| // The AggregateService methods all have a response that either is a Pair | |||
| // or a simple object. When it is a Pair both first_part and second_part | |||
| // have defined values (and the second_part is not present in the response | |||
| // when the response is not a pair). Refer to the AggregateImplementation | |||
| // class for an overview of the AggregateResponse object constructions. | |||
| FirstPart [][]byte `protobuf:"bytes,1,rep,name=first_part" json:"first_part,omitempty"` | |||
| SecondPart []byte `protobuf:"bytes,2,opt,name=second_part" json:"second_part,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *AggregateResponse) Reset() { *m = AggregateResponse{} } | |||
| func (m *AggregateResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*AggregateResponse) ProtoMessage() {} | |||
| func (m *AggregateResponse) GetFirstPart() [][]byte { | |||
| if m != nil { | |||
| return m.FirstPart | |||
| } | |||
| return nil | |||
| } | |||
| func (m *AggregateResponse) GetSecondPart() []byte { | |||
| if m != nil { | |||
| return m.SecondPart | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| } | |||
| @@ -1,228 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Authentication.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type TokenIdentifier_Kind int32 | |||
| const ( | |||
| TokenIdentifier_HBASE_AUTH_TOKEN TokenIdentifier_Kind = 0 | |||
| ) | |||
| var TokenIdentifier_Kind_name = map[int32]string{ | |||
| 0: "HBASE_AUTH_TOKEN", | |||
| } | |||
| var TokenIdentifier_Kind_value = map[string]int32{ | |||
| "HBASE_AUTH_TOKEN": 0, | |||
| } | |||
| func (x TokenIdentifier_Kind) Enum() *TokenIdentifier_Kind { | |||
| p := new(TokenIdentifier_Kind) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x TokenIdentifier_Kind) String() string { | |||
| return proto1.EnumName(TokenIdentifier_Kind_name, int32(x)) | |||
| } | |||
| func (x *TokenIdentifier_Kind) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(TokenIdentifier_Kind_value, data, "TokenIdentifier_Kind") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = TokenIdentifier_Kind(value) | |||
| return nil | |||
| } | |||
| type AuthenticationKey struct { | |||
| Id *int32 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` | |||
| ExpirationDate *int64 `protobuf:"varint,2,req,name=expiration_date" json:"expiration_date,omitempty"` | |||
| Key []byte `protobuf:"bytes,3,req,name=key" json:"key,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *AuthenticationKey) Reset() { *m = AuthenticationKey{} } | |||
| func (m *AuthenticationKey) String() string { return proto1.CompactTextString(m) } | |||
| func (*AuthenticationKey) ProtoMessage() {} | |||
| func (m *AuthenticationKey) GetId() int32 { | |||
| if m != nil && m.Id != nil { | |||
| return *m.Id | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *AuthenticationKey) GetExpirationDate() int64 { | |||
| if m != nil && m.ExpirationDate != nil { | |||
| return *m.ExpirationDate | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *AuthenticationKey) GetKey() []byte { | |||
| if m != nil { | |||
| return m.Key | |||
| } | |||
| return nil | |||
| } | |||
| type TokenIdentifier struct { | |||
| Kind *TokenIdentifier_Kind `protobuf:"varint,1,req,name=kind,enum=proto.TokenIdentifier_Kind" json:"kind,omitempty"` | |||
| Username []byte `protobuf:"bytes,2,req,name=username" json:"username,omitempty"` | |||
| KeyId *int32 `protobuf:"varint,3,req,name=key_id" json:"key_id,omitempty"` | |||
| IssueDate *int64 `protobuf:"varint,4,opt,name=issue_date" json:"issue_date,omitempty"` | |||
| ExpirationDate *int64 `protobuf:"varint,5,opt,name=expiration_date" json:"expiration_date,omitempty"` | |||
| SequenceNumber *int64 `protobuf:"varint,6,opt,name=sequence_number" json:"sequence_number,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *TokenIdentifier) Reset() { *m = TokenIdentifier{} } | |||
| func (m *TokenIdentifier) String() string { return proto1.CompactTextString(m) } | |||
| func (*TokenIdentifier) ProtoMessage() {} | |||
| func (m *TokenIdentifier) GetKind() TokenIdentifier_Kind { | |||
| if m != nil && m.Kind != nil { | |||
| return *m.Kind | |||
| } | |||
| return TokenIdentifier_HBASE_AUTH_TOKEN | |||
| } | |||
| func (m *TokenIdentifier) GetUsername() []byte { | |||
| if m != nil { | |||
| return m.Username | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TokenIdentifier) GetKeyId() int32 { | |||
| if m != nil && m.KeyId != nil { | |||
| return *m.KeyId | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *TokenIdentifier) GetIssueDate() int64 { | |||
| if m != nil && m.IssueDate != nil { | |||
| return *m.IssueDate | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *TokenIdentifier) GetExpirationDate() int64 { | |||
| if m != nil && m.ExpirationDate != nil { | |||
| return *m.ExpirationDate | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *TokenIdentifier) GetSequenceNumber() int64 { | |||
| if m != nil && m.SequenceNumber != nil { | |||
| return *m.SequenceNumber | |||
| } | |||
| return 0 | |||
| } | |||
| // Serialization of the org.apache.hadoop.security.token.Token class | |||
| // Note that this is a Hadoop class, so fields may change! | |||
| type Token struct { | |||
| // the TokenIdentifier in serialized form | |||
| // Note: we can't use the protobuf directly because the Hadoop Token class | |||
| // only stores the serialized bytes | |||
| Identifier []byte `protobuf:"bytes,1,opt,name=identifier" json:"identifier,omitempty"` | |||
| Password []byte `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` | |||
| Service []byte `protobuf:"bytes,3,opt,name=service" json:"service,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Token) Reset() { *m = Token{} } | |||
| func (m *Token) String() string { return proto1.CompactTextString(m) } | |||
| func (*Token) ProtoMessage() {} | |||
| func (m *Token) GetIdentifier() []byte { | |||
| if m != nil { | |||
| return m.Identifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Token) GetPassword() []byte { | |||
| if m != nil { | |||
| return m.Password | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Token) GetService() []byte { | |||
| if m != nil { | |||
| return m.Service | |||
| } | |||
| return nil | |||
| } | |||
| // RPC request & response messages | |||
| type GetAuthenticationTokenRequest struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetAuthenticationTokenRequest) Reset() { *m = GetAuthenticationTokenRequest{} } | |||
| func (m *GetAuthenticationTokenRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetAuthenticationTokenRequest) ProtoMessage() {} | |||
| type GetAuthenticationTokenResponse struct { | |||
| Token *Token `protobuf:"bytes,1,opt,name=token" json:"token,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GetAuthenticationTokenResponse) Reset() { *m = GetAuthenticationTokenResponse{} } | |||
| func (m *GetAuthenticationTokenResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*GetAuthenticationTokenResponse) ProtoMessage() {} | |||
| func (m *GetAuthenticationTokenResponse) GetToken() *Token { | |||
| if m != nil { | |||
| return m.Token | |||
| } | |||
| return nil | |||
| } | |||
| type WhoAmIRequest struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *WhoAmIRequest) Reset() { *m = WhoAmIRequest{} } | |||
| func (m *WhoAmIRequest) String() string { return proto1.CompactTextString(m) } | |||
| func (*WhoAmIRequest) ProtoMessage() {} | |||
| type WhoAmIResponse struct { | |||
| Username *string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"` | |||
| AuthMethod *string `protobuf:"bytes,2,opt,name=auth_method" json:"auth_method,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *WhoAmIResponse) Reset() { *m = WhoAmIResponse{} } | |||
| func (m *WhoAmIResponse) String() string { return proto1.CompactTextString(m) } | |||
| func (*WhoAmIResponse) ProtoMessage() {} | |||
| func (m *WhoAmIResponse) GetUsername() string { | |||
| if m != nil && m.Username != nil { | |||
| return *m.Username | |||
| } | |||
| return "" | |||
| } | |||
| func (m *WhoAmIResponse) GetAuthMethod() string { | |||
| if m != nil && m.AuthMethod != nil { | |||
| return *m.AuthMethod | |||
| } | |||
| return "" | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.TokenIdentifier_Kind", TokenIdentifier_Kind_name, TokenIdentifier_Kind_value) | |||
| } | |||
| @@ -1,197 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Cell.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| // * | |||
| // The type of the key in a Cell | |||
| type CellType int32 | |||
| const ( | |||
| CellType_MINIMUM CellType = 0 | |||
| CellType_PUT CellType = 4 | |||
| CellType_DELETE CellType = 8 | |||
| CellType_DELETE_COLUMN CellType = 12 | |||
| CellType_DELETE_FAMILY CellType = 14 | |||
| // MAXIMUM is used when searching; you look from maximum on down. | |||
| CellType_MAXIMUM CellType = 255 | |||
| ) | |||
| var CellType_name = map[int32]string{ | |||
| 0: "MINIMUM", | |||
| 4: "PUT", | |||
| 8: "DELETE", | |||
| 12: "DELETE_COLUMN", | |||
| 14: "DELETE_FAMILY", | |||
| 255: "MAXIMUM", | |||
| } | |||
| var CellType_value = map[string]int32{ | |||
| "MINIMUM": 0, | |||
| "PUT": 4, | |||
| "DELETE": 8, | |||
| "DELETE_COLUMN": 12, | |||
| "DELETE_FAMILY": 14, | |||
| "MAXIMUM": 255, | |||
| } | |||
| func (x CellType) Enum() *CellType { | |||
| p := new(CellType) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x CellType) String() string { | |||
| return proto1.EnumName(CellType_name, int32(x)) | |||
| } | |||
| func (x *CellType) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(CellType_value, data, "CellType") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = CellType(value) | |||
| return nil | |||
| } | |||
| // * | |||
| // Protocol buffer version of Cell. | |||
| type Cell struct { | |||
| Row []byte `protobuf:"bytes,1,opt,name=row" json:"row,omitempty"` | |||
| Family []byte `protobuf:"bytes,2,opt,name=family" json:"family,omitempty"` | |||
| Qualifier []byte `protobuf:"bytes,3,opt,name=qualifier" json:"qualifier,omitempty"` | |||
| Timestamp *uint64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` | |||
| CellType *CellType `protobuf:"varint,5,opt,name=cell_type,enum=proto.CellType" json:"cell_type,omitempty"` | |||
| Value []byte `protobuf:"bytes,6,opt,name=value" json:"value,omitempty"` | |||
| Tags []byte `protobuf:"bytes,7,opt,name=tags" json:"tags,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Cell) Reset() { *m = Cell{} } | |||
| func (m *Cell) String() string { return proto1.CompactTextString(m) } | |||
| func (*Cell) ProtoMessage() {} | |||
| func (m *Cell) GetRow() []byte { | |||
| if m != nil { | |||
| return m.Row | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Cell) GetFamily() []byte { | |||
| if m != nil { | |||
| return m.Family | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Cell) GetQualifier() []byte { | |||
| if m != nil { | |||
| return m.Qualifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Cell) GetTimestamp() uint64 { | |||
| if m != nil && m.Timestamp != nil { | |||
| return *m.Timestamp | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *Cell) GetCellType() CellType { | |||
| if m != nil && m.CellType != nil { | |||
| return *m.CellType | |||
| } | |||
| return CellType_MINIMUM | |||
| } | |||
| func (m *Cell) GetValue() []byte { | |||
| if m != nil { | |||
| return m.Value | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Cell) GetTags() []byte { | |||
| if m != nil { | |||
| return m.Tags | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Protocol buffer version of KeyValue. | |||
| // It doesn't have those transient parameters | |||
| type KeyValue struct { | |||
| Row []byte `protobuf:"bytes,1,req,name=row" json:"row,omitempty"` | |||
| Family []byte `protobuf:"bytes,2,req,name=family" json:"family,omitempty"` | |||
| Qualifier []byte `protobuf:"bytes,3,req,name=qualifier" json:"qualifier,omitempty"` | |||
| Timestamp *uint64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` | |||
| KeyType *CellType `protobuf:"varint,5,opt,name=key_type,enum=proto.CellType" json:"key_type,omitempty"` | |||
| Value []byte `protobuf:"bytes,6,opt,name=value" json:"value,omitempty"` | |||
| Tags []byte `protobuf:"bytes,7,opt,name=tags" json:"tags,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *KeyValue) Reset() { *m = KeyValue{} } | |||
| func (m *KeyValue) String() string { return proto1.CompactTextString(m) } | |||
| func (*KeyValue) ProtoMessage() {} | |||
| func (m *KeyValue) GetRow() []byte { | |||
| if m != nil { | |||
| return m.Row | |||
| } | |||
| return nil | |||
| } | |||
| func (m *KeyValue) GetFamily() []byte { | |||
| if m != nil { | |||
| return m.Family | |||
| } | |||
| return nil | |||
| } | |||
| func (m *KeyValue) GetQualifier() []byte { | |||
| if m != nil { | |||
| return m.Qualifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *KeyValue) GetTimestamp() uint64 { | |||
| if m != nil && m.Timestamp != nil { | |||
| return *m.Timestamp | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *KeyValue) GetKeyType() CellType { | |||
| if m != nil && m.KeyType != nil { | |||
| return *m.KeyType | |||
| } | |||
| return CellType_MINIMUM | |||
| } | |||
| func (m *KeyValue) GetValue() []byte { | |||
| if m != nil { | |||
| return m.Value | |||
| } | |||
| return nil | |||
| } | |||
| func (m *KeyValue) GetTags() []byte { | |||
| if m != nil { | |||
| return m.Tags | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.CellType", CellType_name, CellType_value) | |||
| } | |||
| @@ -1,35 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: ClusterId.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| // * | |||
| // Content of the '/hbase/hbaseid', cluster id, znode. | |||
| // Also cluster of the ${HBASE_ROOTDIR}/hbase.id file. | |||
| type ClusterId struct { | |||
| // This is the cluster id, a uuid as a String | |||
| ClusterId *string `protobuf:"bytes,1,req,name=cluster_id" json:"cluster_id,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ClusterId) Reset() { *m = ClusterId{} } | |||
| func (m *ClusterId) String() string { return proto1.CompactTextString(m) } | |||
| func (*ClusterId) ProtoMessage() {} | |||
| func (m *ClusterId) GetClusterId() string { | |||
| if m != nil && m.ClusterId != nil { | |||
| return *m.ClusterId | |||
| } | |||
| return "" | |||
| } | |||
| func init() { | |||
| } | |||
| @@ -1,597 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: ClusterStatus.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type RegionState_State int32 | |||
| const ( | |||
| RegionState_OFFLINE RegionState_State = 0 | |||
| RegionState_PENDING_OPEN RegionState_State = 1 | |||
| RegionState_OPENING RegionState_State = 2 | |||
| RegionState_OPEN RegionState_State = 3 | |||
| RegionState_PENDING_CLOSE RegionState_State = 4 | |||
| RegionState_CLOSING RegionState_State = 5 | |||
| RegionState_CLOSED RegionState_State = 6 | |||
| RegionState_SPLITTING RegionState_State = 7 | |||
| RegionState_SPLIT RegionState_State = 8 | |||
| RegionState_FAILED_OPEN RegionState_State = 9 | |||
| RegionState_FAILED_CLOSE RegionState_State = 10 | |||
| RegionState_MERGING RegionState_State = 11 | |||
| RegionState_MERGED RegionState_State = 12 | |||
| RegionState_SPLITTING_NEW RegionState_State = 13 | |||
| // region but hasn't be created yet, or master doesn't | |||
| // know it's already created | |||
| RegionState_MERGING_NEW RegionState_State = 14 | |||
| ) | |||
| var RegionState_State_name = map[int32]string{ | |||
| 0: "OFFLINE", | |||
| 1: "PENDING_OPEN", | |||
| 2: "OPENING", | |||
| 3: "OPEN", | |||
| 4: "PENDING_CLOSE", | |||
| 5: "CLOSING", | |||
| 6: "CLOSED", | |||
| 7: "SPLITTING", | |||
| 8: "SPLIT", | |||
| 9: "FAILED_OPEN", | |||
| 10: "FAILED_CLOSE", | |||
| 11: "MERGING", | |||
| 12: "MERGED", | |||
| 13: "SPLITTING_NEW", | |||
| 14: "MERGING_NEW", | |||
| } | |||
| var RegionState_State_value = map[string]int32{ | |||
| "OFFLINE": 0, | |||
| "PENDING_OPEN": 1, | |||
| "OPENING": 2, | |||
| "OPEN": 3, | |||
| "PENDING_CLOSE": 4, | |||
| "CLOSING": 5, | |||
| "CLOSED": 6, | |||
| "SPLITTING": 7, | |||
| "SPLIT": 8, | |||
| "FAILED_OPEN": 9, | |||
| "FAILED_CLOSE": 10, | |||
| "MERGING": 11, | |||
| "MERGED": 12, | |||
| "SPLITTING_NEW": 13, | |||
| "MERGING_NEW": 14, | |||
| } | |||
| func (x RegionState_State) Enum() *RegionState_State { | |||
| p := new(RegionState_State) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x RegionState_State) String() string { | |||
| return proto1.EnumName(RegionState_State_name, int32(x)) | |||
| } | |||
| func (x *RegionState_State) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(RegionState_State_value, data, "RegionState_State") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = RegionState_State(value) | |||
| return nil | |||
| } | |||
| type RegionState struct { | |||
| RegionInfo *RegionInfo `protobuf:"bytes,1,req,name=region_info" json:"region_info,omitempty"` | |||
| State *RegionState_State `protobuf:"varint,2,req,name=state,enum=proto.RegionState_State" json:"state,omitempty"` | |||
| Stamp *uint64 `protobuf:"varint,3,opt,name=stamp" json:"stamp,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegionState) Reset() { *m = RegionState{} } | |||
| func (m *RegionState) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegionState) ProtoMessage() {} | |||
| func (m *RegionState) GetRegionInfo() *RegionInfo { | |||
| if m != nil { | |||
| return m.RegionInfo | |||
| } | |||
| return nil | |||
| } | |||
| func (m *RegionState) GetState() RegionState_State { | |||
| if m != nil && m.State != nil { | |||
| return *m.State | |||
| } | |||
| return RegionState_OFFLINE | |||
| } | |||
| func (m *RegionState) GetStamp() uint64 { | |||
| if m != nil && m.Stamp != nil { | |||
| return *m.Stamp | |||
| } | |||
| return 0 | |||
| } | |||
| type RegionInTransition struct { | |||
| Spec *RegionSpecifier `protobuf:"bytes,1,req,name=spec" json:"spec,omitempty"` | |||
| RegionState *RegionState `protobuf:"bytes,2,req,name=region_state" json:"region_state,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegionInTransition) Reset() { *m = RegionInTransition{} } | |||
| func (m *RegionInTransition) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegionInTransition) ProtoMessage() {} | |||
| func (m *RegionInTransition) GetSpec() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.Spec | |||
| } | |||
| return nil | |||
| } | |||
| func (m *RegionInTransition) GetRegionState() *RegionState { | |||
| if m != nil { | |||
| return m.RegionState | |||
| } | |||
| return nil | |||
| } | |||
| type RegionLoad struct { | |||
| // * the region specifier | |||
| RegionSpecifier *RegionSpecifier `protobuf:"bytes,1,req,name=region_specifier" json:"region_specifier,omitempty"` | |||
| // * the number of stores for the region | |||
| Stores *uint32 `protobuf:"varint,2,opt,name=stores" json:"stores,omitempty"` | |||
| // * the number of storefiles for the region | |||
| Storefiles *uint32 `protobuf:"varint,3,opt,name=storefiles" json:"storefiles,omitempty"` | |||
| // * the total size of the store files for the region, uncompressed, in MB | |||
| StoreUncompressedSize_MB *uint32 `protobuf:"varint,4,opt,name=store_uncompressed_size_MB" json:"store_uncompressed_size_MB,omitempty"` | |||
| // * the current total size of the store files for the region, in MB | |||
| StorefileSize_MB *uint32 `protobuf:"varint,5,opt,name=storefile_size_MB" json:"storefile_size_MB,omitempty"` | |||
| // * the current size of the memstore for the region, in MB | |||
| MemstoreSize_MB *uint32 `protobuf:"varint,6,opt,name=memstore_size_MB" json:"memstore_size_MB,omitempty"` | |||
| // * | |||
| // The current total size of root-level store file indexes for the region, | |||
| // in MB. The same as {@link #rootIndexSizeKB} but in MB. | |||
| StorefileIndexSize_MB *uint32 `protobuf:"varint,7,opt,name=storefile_index_size_MB" json:"storefile_index_size_MB,omitempty"` | |||
| // * the current total read requests made to region | |||
| ReadRequestsCount *uint64 `protobuf:"varint,8,opt,name=read_requests_count" json:"read_requests_count,omitempty"` | |||
| // * the current total write requests made to region | |||
| WriteRequestsCount *uint64 `protobuf:"varint,9,opt,name=write_requests_count" json:"write_requests_count,omitempty"` | |||
| // * the total compacting key values in currently running compaction | |||
| TotalCompacting_KVs *uint64 `protobuf:"varint,10,opt,name=total_compacting_KVs" json:"total_compacting_KVs,omitempty"` | |||
| // * the completed count of key values in currently running compaction | |||
| CurrentCompacted_KVs *uint64 `protobuf:"varint,11,opt,name=current_compacted_KVs" json:"current_compacted_KVs,omitempty"` | |||
| // * The current total size of root-level indexes for the region, in KB. | |||
| RootIndexSize_KB *uint32 `protobuf:"varint,12,opt,name=root_index_size_KB" json:"root_index_size_KB,omitempty"` | |||
| // * The total size of all index blocks, not just the root level, in KB. | |||
| TotalStaticIndexSize_KB *uint32 `protobuf:"varint,13,opt,name=total_static_index_size_KB" json:"total_static_index_size_KB,omitempty"` | |||
| // * | |||
| // The total size of all Bloom filter blocks, not just loaded into the | |||
| // block cache, in KB. | |||
| TotalStaticBloomSize_KB *uint32 `protobuf:"varint,14,opt,name=total_static_bloom_size_KB" json:"total_static_bloom_size_KB,omitempty"` | |||
| // * the most recent sequence Id from cache flush | |||
| CompleteSequenceId *uint64 `protobuf:"varint,15,opt,name=complete_sequence_id" json:"complete_sequence_id,omitempty"` | |||
| // * The current data locality for region in the regionserver | |||
| DataLocality *float32 `protobuf:"fixed32,16,opt,name=data_locality" json:"data_locality,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegionLoad) Reset() { *m = RegionLoad{} } | |||
| func (m *RegionLoad) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegionLoad) ProtoMessage() {} | |||
| func (m *RegionLoad) GetRegionSpecifier() *RegionSpecifier { | |||
| if m != nil { | |||
| return m.RegionSpecifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *RegionLoad) GetStores() uint32 { | |||
| if m != nil && m.Stores != nil { | |||
| return *m.Stores | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetStorefiles() uint32 { | |||
| if m != nil && m.Storefiles != nil { | |||
| return *m.Storefiles | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetStoreUncompressedSize_MB() uint32 { | |||
| if m != nil && m.StoreUncompressedSize_MB != nil { | |||
| return *m.StoreUncompressedSize_MB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetStorefileSize_MB() uint32 { | |||
| if m != nil && m.StorefileSize_MB != nil { | |||
| return *m.StorefileSize_MB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetMemstoreSize_MB() uint32 { | |||
| if m != nil && m.MemstoreSize_MB != nil { | |||
| return *m.MemstoreSize_MB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetStorefileIndexSize_MB() uint32 { | |||
| if m != nil && m.StorefileIndexSize_MB != nil { | |||
| return *m.StorefileIndexSize_MB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetReadRequestsCount() uint64 { | |||
| if m != nil && m.ReadRequestsCount != nil { | |||
| return *m.ReadRequestsCount | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetWriteRequestsCount() uint64 { | |||
| if m != nil && m.WriteRequestsCount != nil { | |||
| return *m.WriteRequestsCount | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetTotalCompacting_KVs() uint64 { | |||
| if m != nil && m.TotalCompacting_KVs != nil { | |||
| return *m.TotalCompacting_KVs | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetCurrentCompacted_KVs() uint64 { | |||
| if m != nil && m.CurrentCompacted_KVs != nil { | |||
| return *m.CurrentCompacted_KVs | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetRootIndexSize_KB() uint32 { | |||
| if m != nil && m.RootIndexSize_KB != nil { | |||
| return *m.RootIndexSize_KB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetTotalStaticIndexSize_KB() uint32 { | |||
| if m != nil && m.TotalStaticIndexSize_KB != nil { | |||
| return *m.TotalStaticIndexSize_KB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetTotalStaticBloomSize_KB() uint32 { | |||
| if m != nil && m.TotalStaticBloomSize_KB != nil { | |||
| return *m.TotalStaticBloomSize_KB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetCompleteSequenceId() uint64 { | |||
| if m != nil && m.CompleteSequenceId != nil { | |||
| return *m.CompleteSequenceId | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionLoad) GetDataLocality() float32 { | |||
| if m != nil && m.DataLocality != nil { | |||
| return *m.DataLocality | |||
| } | |||
| return 0 | |||
| } | |||
| type ReplicationLoadSink struct { | |||
| AgeOfLastAppliedOp *uint64 `protobuf:"varint,1,req,name=ageOfLastAppliedOp" json:"ageOfLastAppliedOp,omitempty"` | |||
| TimeStampsOfLastAppliedOp *uint64 `protobuf:"varint,2,req,name=timeStampsOfLastAppliedOp" json:"timeStampsOfLastAppliedOp,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ReplicationLoadSink) Reset() { *m = ReplicationLoadSink{} } | |||
| func (m *ReplicationLoadSink) String() string { return proto1.CompactTextString(m) } | |||
| func (*ReplicationLoadSink) ProtoMessage() {} | |||
| func (m *ReplicationLoadSink) GetAgeOfLastAppliedOp() uint64 { | |||
| if m != nil && m.AgeOfLastAppliedOp != nil { | |||
| return *m.AgeOfLastAppliedOp | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ReplicationLoadSink) GetTimeStampsOfLastAppliedOp() uint64 { | |||
| if m != nil && m.TimeStampsOfLastAppliedOp != nil { | |||
| return *m.TimeStampsOfLastAppliedOp | |||
| } | |||
| return 0 | |||
| } | |||
| type ReplicationLoadSource struct { | |||
| PeerID *string `protobuf:"bytes,1,req,name=peerID" json:"peerID,omitempty"` | |||
| AgeOfLastShippedOp *uint64 `protobuf:"varint,2,req,name=ageOfLastShippedOp" json:"ageOfLastShippedOp,omitempty"` | |||
| SizeOfLogQueue *uint32 `protobuf:"varint,3,req,name=sizeOfLogQueue" json:"sizeOfLogQueue,omitempty"` | |||
| TimeStampOfLastShippedOp *uint64 `protobuf:"varint,4,req,name=timeStampOfLastShippedOp" json:"timeStampOfLastShippedOp,omitempty"` | |||
| ReplicationLag *uint64 `protobuf:"varint,5,req,name=replicationLag" json:"replicationLag,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ReplicationLoadSource) Reset() { *m = ReplicationLoadSource{} } | |||
| func (m *ReplicationLoadSource) String() string { return proto1.CompactTextString(m) } | |||
| func (*ReplicationLoadSource) ProtoMessage() {} | |||
| func (m *ReplicationLoadSource) GetPeerID() string { | |||
| if m != nil && m.PeerID != nil { | |||
| return *m.PeerID | |||
| } | |||
| return "" | |||
| } | |||
| func (m *ReplicationLoadSource) GetAgeOfLastShippedOp() uint64 { | |||
| if m != nil && m.AgeOfLastShippedOp != nil { | |||
| return *m.AgeOfLastShippedOp | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ReplicationLoadSource) GetSizeOfLogQueue() uint32 { | |||
| if m != nil && m.SizeOfLogQueue != nil { | |||
| return *m.SizeOfLogQueue | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ReplicationLoadSource) GetTimeStampOfLastShippedOp() uint64 { | |||
| if m != nil && m.TimeStampOfLastShippedOp != nil { | |||
| return *m.TimeStampOfLastShippedOp | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ReplicationLoadSource) GetReplicationLag() uint64 { | |||
| if m != nil && m.ReplicationLag != nil { | |||
| return *m.ReplicationLag | |||
| } | |||
| return 0 | |||
| } | |||
| type ServerLoad struct { | |||
| // * Number of requests since last report. | |||
| NumberOfRequests *uint32 `protobuf:"varint,1,opt,name=number_of_requests" json:"number_of_requests,omitempty"` | |||
| // * Total Number of requests from the start of the region server. | |||
| TotalNumberOfRequests *uint32 `protobuf:"varint,2,opt,name=total_number_of_requests" json:"total_number_of_requests,omitempty"` | |||
| // * the amount of used heap, in MB. | |||
| UsedHeap_MB *uint32 `protobuf:"varint,3,opt,name=used_heap_MB" json:"used_heap_MB,omitempty"` | |||
| // * the maximum allowable size of the heap, in MB. | |||
| MaxHeap_MB *uint32 `protobuf:"varint,4,opt,name=max_heap_MB" json:"max_heap_MB,omitempty"` | |||
| // * Information on the load of individual regions. | |||
| RegionLoads []*RegionLoad `protobuf:"bytes,5,rep,name=region_loads" json:"region_loads,omitempty"` | |||
| // * | |||
| // Regionserver-level coprocessors, e.g., WALObserver implementations. | |||
| // Region-level coprocessors, on the other hand, are stored inside RegionLoad | |||
| // objects. | |||
| Coprocessors []*Coprocessor `protobuf:"bytes,6,rep,name=coprocessors" json:"coprocessors,omitempty"` | |||
| // * | |||
| // Time when incremental (non-total) counts began being calculated (e.g. number_of_requests) | |||
| // time is measured as the difference, measured in milliseconds, between the current time | |||
| // and midnight, January 1, 1970 UTC. | |||
| ReportStartTime *uint64 `protobuf:"varint,7,opt,name=report_start_time" json:"report_start_time,omitempty"` | |||
| // * | |||
| // Time when report was generated. | |||
| // time is measured as the difference, measured in milliseconds, between the current time | |||
| // and midnight, January 1, 1970 UTC. | |||
| ReportEndTime *uint64 `protobuf:"varint,8,opt,name=report_end_time" json:"report_end_time,omitempty"` | |||
| // * | |||
| // The port number that this region server is hosing an info server on. | |||
| InfoServerPort *uint32 `protobuf:"varint,9,opt,name=info_server_port" json:"info_server_port,omitempty"` | |||
| // * | |||
| // The replicationLoadSource for the replication Source status of this region server. | |||
| ReplLoadSource []*ReplicationLoadSource `protobuf:"bytes,10,rep,name=replLoadSource" json:"replLoadSource,omitempty"` | |||
| // * | |||
| // The replicationLoadSink for the replication Sink status of this region server. | |||
| ReplLoadSink *ReplicationLoadSink `protobuf:"bytes,11,opt,name=replLoadSink" json:"replLoadSink,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ServerLoad) Reset() { *m = ServerLoad{} } | |||
| func (m *ServerLoad) String() string { return proto1.CompactTextString(m) } | |||
| func (*ServerLoad) ProtoMessage() {} | |||
| func (m *ServerLoad) GetNumberOfRequests() uint32 { | |||
| if m != nil && m.NumberOfRequests != nil { | |||
| return *m.NumberOfRequests | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetTotalNumberOfRequests() uint32 { | |||
| if m != nil && m.TotalNumberOfRequests != nil { | |||
| return *m.TotalNumberOfRequests | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetUsedHeap_MB() uint32 { | |||
| if m != nil && m.UsedHeap_MB != nil { | |||
| return *m.UsedHeap_MB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetMaxHeap_MB() uint32 { | |||
| if m != nil && m.MaxHeap_MB != nil { | |||
| return *m.MaxHeap_MB | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetRegionLoads() []*RegionLoad { | |||
| if m != nil { | |||
| return m.RegionLoads | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ServerLoad) GetCoprocessors() []*Coprocessor { | |||
| if m != nil { | |||
| return m.Coprocessors | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ServerLoad) GetReportStartTime() uint64 { | |||
| if m != nil && m.ReportStartTime != nil { | |||
| return *m.ReportStartTime | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetReportEndTime() uint64 { | |||
| if m != nil && m.ReportEndTime != nil { | |||
| return *m.ReportEndTime | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetInfoServerPort() uint32 { | |||
| if m != nil && m.InfoServerPort != nil { | |||
| return *m.InfoServerPort | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerLoad) GetReplLoadSource() []*ReplicationLoadSource { | |||
| if m != nil { | |||
| return m.ReplLoadSource | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ServerLoad) GetReplLoadSink() *ReplicationLoadSink { | |||
| if m != nil { | |||
| return m.ReplLoadSink | |||
| } | |||
| return nil | |||
| } | |||
| type LiveServerInfo struct { | |||
| Server *ServerName `protobuf:"bytes,1,req,name=server" json:"server,omitempty"` | |||
| ServerLoad *ServerLoad `protobuf:"bytes,2,req,name=server_load" json:"server_load,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *LiveServerInfo) Reset() { *m = LiveServerInfo{} } | |||
| func (m *LiveServerInfo) String() string { return proto1.CompactTextString(m) } | |||
| func (*LiveServerInfo) ProtoMessage() {} | |||
| func (m *LiveServerInfo) GetServer() *ServerName { | |||
| if m != nil { | |||
| return m.Server | |||
| } | |||
| return nil | |||
| } | |||
| func (m *LiveServerInfo) GetServerLoad() *ServerLoad { | |||
| if m != nil { | |||
| return m.ServerLoad | |||
| } | |||
| return nil | |||
| } | |||
| type ClusterStatus struct { | |||
| HbaseVersion *HBaseVersionFileContent `protobuf:"bytes,1,opt,name=hbase_version" json:"hbase_version,omitempty"` | |||
| LiveServers []*LiveServerInfo `protobuf:"bytes,2,rep,name=live_servers" json:"live_servers,omitempty"` | |||
| DeadServers []*ServerName `protobuf:"bytes,3,rep,name=dead_servers" json:"dead_servers,omitempty"` | |||
| RegionsInTransition []*RegionInTransition `protobuf:"bytes,4,rep,name=regions_in_transition" json:"regions_in_transition,omitempty"` | |||
| ClusterId *ClusterId `protobuf:"bytes,5,opt,name=cluster_id" json:"cluster_id,omitempty"` | |||
| MasterCoprocessors []*Coprocessor `protobuf:"bytes,6,rep,name=master_coprocessors" json:"master_coprocessors,omitempty"` | |||
| Master *ServerName `protobuf:"bytes,7,opt,name=master" json:"master,omitempty"` | |||
| BackupMasters []*ServerName `protobuf:"bytes,8,rep,name=backup_masters" json:"backup_masters,omitempty"` | |||
| BalancerOn *bool `protobuf:"varint,9,opt,name=balancer_on" json:"balancer_on,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } | |||
| func (m *ClusterStatus) String() string { return proto1.CompactTextString(m) } | |||
| func (*ClusterStatus) ProtoMessage() {} | |||
| func (m *ClusterStatus) GetHbaseVersion() *HBaseVersionFileContent { | |||
| if m != nil { | |||
| return m.HbaseVersion | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetLiveServers() []*LiveServerInfo { | |||
| if m != nil { | |||
| return m.LiveServers | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetDeadServers() []*ServerName { | |||
| if m != nil { | |||
| return m.DeadServers | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetRegionsInTransition() []*RegionInTransition { | |||
| if m != nil { | |||
| return m.RegionsInTransition | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetClusterId() *ClusterId { | |||
| if m != nil { | |||
| return m.ClusterId | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetMasterCoprocessors() []*Coprocessor { | |||
| if m != nil { | |||
| return m.MasterCoprocessors | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetMaster() *ServerName { | |||
| if m != nil { | |||
| return m.Master | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetBackupMasters() []*ServerName { | |||
| if m != nil { | |||
| return m.BackupMasters | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ClusterStatus) GetBalancerOn() bool { | |||
| if m != nil && m.BalancerOn != nil { | |||
| return *m.BalancerOn | |||
| } | |||
| return false | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.RegionState_State", RegionState_State_name, RegionState_State_value) | |||
| } | |||
| @@ -1,228 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Comparator.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type BitComparator_BitwiseOp int32 | |||
| const ( | |||
| BitComparator_AND BitComparator_BitwiseOp = 1 | |||
| BitComparator_OR BitComparator_BitwiseOp = 2 | |||
| BitComparator_XOR BitComparator_BitwiseOp = 3 | |||
| ) | |||
| var BitComparator_BitwiseOp_name = map[int32]string{ | |||
| 1: "AND", | |||
| 2: "OR", | |||
| 3: "XOR", | |||
| } | |||
| var BitComparator_BitwiseOp_value = map[string]int32{ | |||
| "AND": 1, | |||
| "OR": 2, | |||
| "XOR": 3, | |||
| } | |||
| func (x BitComparator_BitwiseOp) Enum() *BitComparator_BitwiseOp { | |||
| p := new(BitComparator_BitwiseOp) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x BitComparator_BitwiseOp) String() string { | |||
| return proto1.EnumName(BitComparator_BitwiseOp_name, int32(x)) | |||
| } | |||
| func (x *BitComparator_BitwiseOp) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(BitComparator_BitwiseOp_value, data, "BitComparator_BitwiseOp") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = BitComparator_BitwiseOp(value) | |||
| return nil | |||
| } | |||
| type Comparator struct { | |||
| Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| SerializedComparator []byte `protobuf:"bytes,2,opt,name=serialized_comparator" json:"serialized_comparator,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Comparator) Reset() { *m = Comparator{} } | |||
| func (m *Comparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*Comparator) ProtoMessage() {} | |||
| func (m *Comparator) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| func (m *Comparator) GetSerializedComparator() []byte { | |||
| if m != nil { | |||
| return m.SerializedComparator | |||
| } | |||
| return nil | |||
| } | |||
| type ByteArrayComparable struct { | |||
| Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ByteArrayComparable) Reset() { *m = ByteArrayComparable{} } | |||
| func (m *ByteArrayComparable) String() string { return proto1.CompactTextString(m) } | |||
| func (*ByteArrayComparable) ProtoMessage() {} | |||
| func (m *ByteArrayComparable) GetValue() []byte { | |||
| if m != nil { | |||
| return m.Value | |||
| } | |||
| return nil | |||
| } | |||
| type BinaryComparator struct { | |||
| Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *BinaryComparator) Reset() { *m = BinaryComparator{} } | |||
| func (m *BinaryComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*BinaryComparator) ProtoMessage() {} | |||
| func (m *BinaryComparator) GetComparable() *ByteArrayComparable { | |||
| if m != nil { | |||
| return m.Comparable | |||
| } | |||
| return nil | |||
| } | |||
| type LongComparator struct { | |||
| Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *LongComparator) Reset() { *m = LongComparator{} } | |||
| func (m *LongComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*LongComparator) ProtoMessage() {} | |||
| func (m *LongComparator) GetComparable() *ByteArrayComparable { | |||
| if m != nil { | |||
| return m.Comparable | |||
| } | |||
| return nil | |||
| } | |||
| type BinaryPrefixComparator struct { | |||
| Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *BinaryPrefixComparator) Reset() { *m = BinaryPrefixComparator{} } | |||
| func (m *BinaryPrefixComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*BinaryPrefixComparator) ProtoMessage() {} | |||
| func (m *BinaryPrefixComparator) GetComparable() *ByteArrayComparable { | |||
| if m != nil { | |||
| return m.Comparable | |||
| } | |||
| return nil | |||
| } | |||
| type BitComparator struct { | |||
| Comparable *ByteArrayComparable `protobuf:"bytes,1,req,name=comparable" json:"comparable,omitempty"` | |||
| BitwiseOp *BitComparator_BitwiseOp `protobuf:"varint,2,req,name=bitwise_op,enum=proto.BitComparator_BitwiseOp" json:"bitwise_op,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *BitComparator) Reset() { *m = BitComparator{} } | |||
| func (m *BitComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*BitComparator) ProtoMessage() {} | |||
| func (m *BitComparator) GetComparable() *ByteArrayComparable { | |||
| if m != nil { | |||
| return m.Comparable | |||
| } | |||
| return nil | |||
| } | |||
| func (m *BitComparator) GetBitwiseOp() BitComparator_BitwiseOp { | |||
| if m != nil && m.BitwiseOp != nil { | |||
| return *m.BitwiseOp | |||
| } | |||
| return BitComparator_AND | |||
| } | |||
| type NullComparator struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *NullComparator) Reset() { *m = NullComparator{} } | |||
| func (m *NullComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*NullComparator) ProtoMessage() {} | |||
| type RegexStringComparator struct { | |||
| Pattern *string `protobuf:"bytes,1,req,name=pattern" json:"pattern,omitempty"` | |||
| PatternFlags *int32 `protobuf:"varint,2,req,name=pattern_flags" json:"pattern_flags,omitempty"` | |||
| Charset *string `protobuf:"bytes,3,req,name=charset" json:"charset,omitempty"` | |||
| Engine *string `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegexStringComparator) Reset() { *m = RegexStringComparator{} } | |||
| func (m *RegexStringComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegexStringComparator) ProtoMessage() {} | |||
| func (m *RegexStringComparator) GetPattern() string { | |||
| if m != nil && m.Pattern != nil { | |||
| return *m.Pattern | |||
| } | |||
| return "" | |||
| } | |||
| func (m *RegexStringComparator) GetPatternFlags() int32 { | |||
| if m != nil && m.PatternFlags != nil { | |||
| return *m.PatternFlags | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegexStringComparator) GetCharset() string { | |||
| if m != nil && m.Charset != nil { | |||
| return *m.Charset | |||
| } | |||
| return "" | |||
| } | |||
| func (m *RegexStringComparator) GetEngine() string { | |||
| if m != nil && m.Engine != nil { | |||
| return *m.Engine | |||
| } | |||
| return "" | |||
| } | |||
| type SubstringComparator struct { | |||
| Substr *string `protobuf:"bytes,1,req,name=substr" json:"substr,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SubstringComparator) Reset() { *m = SubstringComparator{} } | |||
| func (m *SubstringComparator) String() string { return proto1.CompactTextString(m) } | |||
| func (*SubstringComparator) ProtoMessage() {} | |||
| func (m *SubstringComparator) GetSubstr() string { | |||
| if m != nil && m.Substr != nil { | |||
| return *m.Substr | |||
| } | |||
| return "" | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.BitComparator_BitwiseOp", BitComparator_BitwiseOp_name, BitComparator_BitwiseOp_value) | |||
| } | |||
| @@ -1,63 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Encryption.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type WrappedKey struct { | |||
| Algorithm *string `protobuf:"bytes,1,req,name=algorithm" json:"algorithm,omitempty"` | |||
| Length *uint32 `protobuf:"varint,2,req,name=length" json:"length,omitempty"` | |||
| Data []byte `protobuf:"bytes,3,req,name=data" json:"data,omitempty"` | |||
| Iv []byte `protobuf:"bytes,4,opt,name=iv" json:"iv,omitempty"` | |||
| Hash []byte `protobuf:"bytes,5,opt,name=hash" json:"hash,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *WrappedKey) Reset() { *m = WrappedKey{} } | |||
| func (m *WrappedKey) String() string { return proto1.CompactTextString(m) } | |||
| func (*WrappedKey) ProtoMessage() {} | |||
| func (m *WrappedKey) GetAlgorithm() string { | |||
| if m != nil && m.Algorithm != nil { | |||
| return *m.Algorithm | |||
| } | |||
| return "" | |||
| } | |||
| func (m *WrappedKey) GetLength() uint32 { | |||
| if m != nil && m.Length != nil { | |||
| return *m.Length | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *WrappedKey) GetData() []byte { | |||
| if m != nil { | |||
| return m.Data | |||
| } | |||
| return nil | |||
| } | |||
| func (m *WrappedKey) GetIv() []byte { | |||
| if m != nil { | |||
| return m.Iv | |||
| } | |||
| return nil | |||
| } | |||
| func (m *WrappedKey) GetHash() []byte { | |||
| if m != nil { | |||
| return m.Hash | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| } | |||
| @@ -1,130 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: ErrorHandling.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| // * | |||
| // Protobuf version of a java.lang.StackTraceElement | |||
| // so we can serialize exceptions. | |||
| type StackTraceElementMessage struct { | |||
| DeclaringClass *string `protobuf:"bytes,1,opt,name=declaring_class" json:"declaring_class,omitempty"` | |||
| MethodName *string `protobuf:"bytes,2,opt,name=method_name" json:"method_name,omitempty"` | |||
| FileName *string `protobuf:"bytes,3,opt,name=file_name" json:"file_name,omitempty"` | |||
| LineNumber *int32 `protobuf:"varint,4,opt,name=line_number" json:"line_number,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *StackTraceElementMessage) Reset() { *m = StackTraceElementMessage{} } | |||
| func (m *StackTraceElementMessage) String() string { return proto1.CompactTextString(m) } | |||
| func (*StackTraceElementMessage) ProtoMessage() {} | |||
| func (m *StackTraceElementMessage) GetDeclaringClass() string { | |||
| if m != nil && m.DeclaringClass != nil { | |||
| return *m.DeclaringClass | |||
| } | |||
| return "" | |||
| } | |||
| func (m *StackTraceElementMessage) GetMethodName() string { | |||
| if m != nil && m.MethodName != nil { | |||
| return *m.MethodName | |||
| } | |||
| return "" | |||
| } | |||
| func (m *StackTraceElementMessage) GetFileName() string { | |||
| if m != nil && m.FileName != nil { | |||
| return *m.FileName | |||
| } | |||
| return "" | |||
| } | |||
| func (m *StackTraceElementMessage) GetLineNumber() int32 { | |||
| if m != nil && m.LineNumber != nil { | |||
| return *m.LineNumber | |||
| } | |||
| return 0 | |||
| } | |||
| // * | |||
| // Cause of a remote failure for a generic exception. Contains | |||
| // all the information for a generic exception as well as | |||
| // optional info about the error for generic info passing | |||
| // (which should be another protobuffed class). | |||
| type GenericExceptionMessage struct { | |||
| ClassName *string `protobuf:"bytes,1,opt,name=class_name" json:"class_name,omitempty"` | |||
| Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` | |||
| ErrorInfo []byte `protobuf:"bytes,3,opt,name=error_info" json:"error_info,omitempty"` | |||
| Trace []*StackTraceElementMessage `protobuf:"bytes,4,rep,name=trace" json:"trace,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *GenericExceptionMessage) Reset() { *m = GenericExceptionMessage{} } | |||
| func (m *GenericExceptionMessage) String() string { return proto1.CompactTextString(m) } | |||
| func (*GenericExceptionMessage) ProtoMessage() {} | |||
| func (m *GenericExceptionMessage) GetClassName() string { | |||
| if m != nil && m.ClassName != nil { | |||
| return *m.ClassName | |||
| } | |||
| return "" | |||
| } | |||
| func (m *GenericExceptionMessage) GetMessage() string { | |||
| if m != nil && m.Message != nil { | |||
| return *m.Message | |||
| } | |||
| return "" | |||
| } | |||
| func (m *GenericExceptionMessage) GetErrorInfo() []byte { | |||
| if m != nil { | |||
| return m.ErrorInfo | |||
| } | |||
| return nil | |||
| } | |||
| func (m *GenericExceptionMessage) GetTrace() []*StackTraceElementMessage { | |||
| if m != nil { | |||
| return m.Trace | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Exception sent across the wire when a remote task needs | |||
| // to notify other tasks that it failed and why | |||
| type ForeignExceptionMessage struct { | |||
| Source *string `protobuf:"bytes,1,opt,name=source" json:"source,omitempty"` | |||
| GenericException *GenericExceptionMessage `protobuf:"bytes,2,opt,name=generic_exception" json:"generic_exception,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ForeignExceptionMessage) Reset() { *m = ForeignExceptionMessage{} } | |||
| func (m *ForeignExceptionMessage) String() string { return proto1.CompactTextString(m) } | |||
| func (*ForeignExceptionMessage) ProtoMessage() {} | |||
| func (m *ForeignExceptionMessage) GetSource() string { | |||
| if m != nil && m.Source != nil { | |||
| return *m.Source | |||
| } | |||
| return "" | |||
| } | |||
| func (m *ForeignExceptionMessage) GetGenericException() *GenericExceptionMessage { | |||
| if m != nil { | |||
| return m.GenericException | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| } | |||
| @@ -1,93 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: FS.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type Reference_Range int32 | |||
| const ( | |||
| Reference_TOP Reference_Range = 0 | |||
| Reference_BOTTOM Reference_Range = 1 | |||
| ) | |||
| var Reference_Range_name = map[int32]string{ | |||
| 0: "TOP", | |||
| 1: "BOTTOM", | |||
| } | |||
| var Reference_Range_value = map[string]int32{ | |||
| "TOP": 0, | |||
| "BOTTOM": 1, | |||
| } | |||
| func (x Reference_Range) Enum() *Reference_Range { | |||
| p := new(Reference_Range) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x Reference_Range) String() string { | |||
| return proto1.EnumName(Reference_Range_name, int32(x)) | |||
| } | |||
| func (x *Reference_Range) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(Reference_Range_value, data, "Reference_Range") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = Reference_Range(value) | |||
| return nil | |||
| } | |||
| // * | |||
| // The ${HBASE_ROOTDIR}/hbase.version file content | |||
| type HBaseVersionFileContent struct { | |||
| Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *HBaseVersionFileContent) Reset() { *m = HBaseVersionFileContent{} } | |||
| func (m *HBaseVersionFileContent) String() string { return proto1.CompactTextString(m) } | |||
| func (*HBaseVersionFileContent) ProtoMessage() {} | |||
| func (m *HBaseVersionFileContent) GetVersion() string { | |||
| if m != nil && m.Version != nil { | |||
| return *m.Version | |||
| } | |||
| return "" | |||
| } | |||
| // * | |||
| // Reference file content used when we split an hfile under a region. | |||
| type Reference struct { | |||
| Splitkey []byte `protobuf:"bytes,1,req,name=splitkey" json:"splitkey,omitempty"` | |||
| Range *Reference_Range `protobuf:"varint,2,req,name=range,enum=proto.Reference_Range" json:"range,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Reference) Reset() { *m = Reference{} } | |||
| func (m *Reference) String() string { return proto1.CompactTextString(m) } | |||
| func (*Reference) ProtoMessage() {} | |||
| func (m *Reference) GetSplitkey() []byte { | |||
| if m != nil { | |||
| return m.Splitkey | |||
| } | |||
| return nil | |||
| } | |||
| func (m *Reference) GetRange() Reference_Range { | |||
| if m != nil && m.Range != nil { | |||
| return *m.Range | |||
| } | |||
| return Reference_TOP | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.Reference_Range", Reference_Range_name, Reference_Range_value) | |||
| } | |||
| @@ -1,609 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: Filter.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| type FilterList_Operator int32 | |||
| const ( | |||
| FilterList_MUST_PASS_ALL FilterList_Operator = 1 | |||
| FilterList_MUST_PASS_ONE FilterList_Operator = 2 | |||
| ) | |||
| var FilterList_Operator_name = map[int32]string{ | |||
| 1: "MUST_PASS_ALL", | |||
| 2: "MUST_PASS_ONE", | |||
| } | |||
| var FilterList_Operator_value = map[string]int32{ | |||
| "MUST_PASS_ALL": 1, | |||
| "MUST_PASS_ONE": 2, | |||
| } | |||
| func (x FilterList_Operator) Enum() *FilterList_Operator { | |||
| p := new(FilterList_Operator) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x FilterList_Operator) String() string { | |||
| return proto1.EnumName(FilterList_Operator_name, int32(x)) | |||
| } | |||
| func (x *FilterList_Operator) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(FilterList_Operator_value, data, "FilterList_Operator") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = FilterList_Operator(value) | |||
| return nil | |||
| } | |||
| type Filter struct { | |||
| Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| SerializedFilter []byte `protobuf:"bytes,2,opt,name=serialized_filter" json:"serialized_filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Filter) Reset() { *m = Filter{} } | |||
| func (m *Filter) String() string { return proto1.CompactTextString(m) } | |||
| func (*Filter) ProtoMessage() {} | |||
| func (m *Filter) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| func (m *Filter) GetSerializedFilter() []byte { | |||
| if m != nil { | |||
| return m.SerializedFilter | |||
| } | |||
| return nil | |||
| } | |||
| type ColumnCountGetFilter struct { | |||
| Limit *int32 `protobuf:"varint,1,req,name=limit" json:"limit,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ColumnCountGetFilter) Reset() { *m = ColumnCountGetFilter{} } | |||
| func (m *ColumnCountGetFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*ColumnCountGetFilter) ProtoMessage() {} | |||
| func (m *ColumnCountGetFilter) GetLimit() int32 { | |||
| if m != nil && m.Limit != nil { | |||
| return *m.Limit | |||
| } | |||
| return 0 | |||
| } | |||
| type ColumnPaginationFilter struct { | |||
| Limit *int32 `protobuf:"varint,1,req,name=limit" json:"limit,omitempty"` | |||
| Offset *int32 `protobuf:"varint,2,opt,name=offset" json:"offset,omitempty"` | |||
| ColumnOffset []byte `protobuf:"bytes,3,opt,name=column_offset" json:"column_offset,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ColumnPaginationFilter) Reset() { *m = ColumnPaginationFilter{} } | |||
| func (m *ColumnPaginationFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*ColumnPaginationFilter) ProtoMessage() {} | |||
| func (m *ColumnPaginationFilter) GetLimit() int32 { | |||
| if m != nil && m.Limit != nil { | |||
| return *m.Limit | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ColumnPaginationFilter) GetOffset() int32 { | |||
| if m != nil && m.Offset != nil { | |||
| return *m.Offset | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ColumnPaginationFilter) GetColumnOffset() []byte { | |||
| if m != nil { | |||
| return m.ColumnOffset | |||
| } | |||
| return nil | |||
| } | |||
| type ColumnPrefixFilter struct { | |||
| Prefix []byte `protobuf:"bytes,1,req,name=prefix" json:"prefix,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ColumnPrefixFilter) Reset() { *m = ColumnPrefixFilter{} } | |||
| func (m *ColumnPrefixFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*ColumnPrefixFilter) ProtoMessage() {} | |||
| func (m *ColumnPrefixFilter) GetPrefix() []byte { | |||
| if m != nil { | |||
| return m.Prefix | |||
| } | |||
| return nil | |||
| } | |||
| type ColumnRangeFilter struct { | |||
| MinColumn []byte `protobuf:"bytes,1,opt,name=min_column" json:"min_column,omitempty"` | |||
| MinColumnInclusive *bool `protobuf:"varint,2,opt,name=min_column_inclusive" json:"min_column_inclusive,omitempty"` | |||
| MaxColumn []byte `protobuf:"bytes,3,opt,name=max_column" json:"max_column,omitempty"` | |||
| MaxColumnInclusive *bool `protobuf:"varint,4,opt,name=max_column_inclusive" json:"max_column_inclusive,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ColumnRangeFilter) Reset() { *m = ColumnRangeFilter{} } | |||
| func (m *ColumnRangeFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*ColumnRangeFilter) ProtoMessage() {} | |||
| func (m *ColumnRangeFilter) GetMinColumn() []byte { | |||
| if m != nil { | |||
| return m.MinColumn | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ColumnRangeFilter) GetMinColumnInclusive() bool { | |||
| if m != nil && m.MinColumnInclusive != nil { | |||
| return *m.MinColumnInclusive | |||
| } | |||
| return false | |||
| } | |||
| func (m *ColumnRangeFilter) GetMaxColumn() []byte { | |||
| if m != nil { | |||
| return m.MaxColumn | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ColumnRangeFilter) GetMaxColumnInclusive() bool { | |||
| if m != nil && m.MaxColumnInclusive != nil { | |||
| return *m.MaxColumnInclusive | |||
| } | |||
| return false | |||
| } | |||
| type CompareFilter struct { | |||
| CompareOp *CompareType `protobuf:"varint,1,req,name=compare_op,enum=proto.CompareType" json:"compare_op,omitempty"` | |||
| Comparator *Comparator `protobuf:"bytes,2,opt,name=comparator" json:"comparator,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *CompareFilter) Reset() { *m = CompareFilter{} } | |||
| func (m *CompareFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*CompareFilter) ProtoMessage() {} | |||
| func (m *CompareFilter) GetCompareOp() CompareType { | |||
| if m != nil && m.CompareOp != nil { | |||
| return *m.CompareOp | |||
| } | |||
| return CompareType_LESS | |||
| } | |||
| func (m *CompareFilter) GetComparator() *Comparator { | |||
| if m != nil { | |||
| return m.Comparator | |||
| } | |||
| return nil | |||
| } | |||
| type DependentColumnFilter struct { | |||
| CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` | |||
| ColumnFamily []byte `protobuf:"bytes,2,opt,name=column_family" json:"column_family,omitempty"` | |||
| ColumnQualifier []byte `protobuf:"bytes,3,opt,name=column_qualifier" json:"column_qualifier,omitempty"` | |||
| DropDependentColumn *bool `protobuf:"varint,4,opt,name=drop_dependent_column" json:"drop_dependent_column,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *DependentColumnFilter) Reset() { *m = DependentColumnFilter{} } | |||
| func (m *DependentColumnFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*DependentColumnFilter) ProtoMessage() {} | |||
| func (m *DependentColumnFilter) GetCompareFilter() *CompareFilter { | |||
| if m != nil { | |||
| return m.CompareFilter | |||
| } | |||
| return nil | |||
| } | |||
| func (m *DependentColumnFilter) GetColumnFamily() []byte { | |||
| if m != nil { | |||
| return m.ColumnFamily | |||
| } | |||
| return nil | |||
| } | |||
| func (m *DependentColumnFilter) GetColumnQualifier() []byte { | |||
| if m != nil { | |||
| return m.ColumnQualifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *DependentColumnFilter) GetDropDependentColumn() bool { | |||
| if m != nil && m.DropDependentColumn != nil { | |||
| return *m.DropDependentColumn | |||
| } | |||
| return false | |||
| } | |||
| type FamilyFilter struct { | |||
| CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FamilyFilter) Reset() { *m = FamilyFilter{} } | |||
| func (m *FamilyFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*FamilyFilter) ProtoMessage() {} | |||
| func (m *FamilyFilter) GetCompareFilter() *CompareFilter { | |||
| if m != nil { | |||
| return m.CompareFilter | |||
| } | |||
| return nil | |||
| } | |||
| type FilterList struct { | |||
| Operator *FilterList_Operator `protobuf:"varint,1,req,name=operator,enum=proto.FilterList_Operator" json:"operator,omitempty"` | |||
| Filters []*Filter `protobuf:"bytes,2,rep,name=filters" json:"filters,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FilterList) Reset() { *m = FilterList{} } | |||
| func (m *FilterList) String() string { return proto1.CompactTextString(m) } | |||
| func (*FilterList) ProtoMessage() {} | |||
| func (m *FilterList) GetOperator() FilterList_Operator { | |||
| if m != nil && m.Operator != nil { | |||
| return *m.Operator | |||
| } | |||
| return FilterList_MUST_PASS_ALL | |||
| } | |||
| func (m *FilterList) GetFilters() []*Filter { | |||
| if m != nil { | |||
| return m.Filters | |||
| } | |||
| return nil | |||
| } | |||
| type FilterWrapper struct { | |||
| Filter *Filter `protobuf:"bytes,1,req,name=filter" json:"filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FilterWrapper) Reset() { *m = FilterWrapper{} } | |||
| func (m *FilterWrapper) String() string { return proto1.CompactTextString(m) } | |||
| func (*FilterWrapper) ProtoMessage() {} | |||
| func (m *FilterWrapper) GetFilter() *Filter { | |||
| if m != nil { | |||
| return m.Filter | |||
| } | |||
| return nil | |||
| } | |||
| type FirstKeyOnlyFilter struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FirstKeyOnlyFilter) Reset() { *m = FirstKeyOnlyFilter{} } | |||
| func (m *FirstKeyOnlyFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*FirstKeyOnlyFilter) ProtoMessage() {} | |||
| type FirstKeyValueMatchingQualifiersFilter struct { | |||
| Qualifiers [][]byte `protobuf:"bytes,1,rep,name=qualifiers" json:"qualifiers,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FirstKeyValueMatchingQualifiersFilter) Reset() { *m = FirstKeyValueMatchingQualifiersFilter{} } | |||
| func (m *FirstKeyValueMatchingQualifiersFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*FirstKeyValueMatchingQualifiersFilter) ProtoMessage() {} | |||
| func (m *FirstKeyValueMatchingQualifiersFilter) GetQualifiers() [][]byte { | |||
| if m != nil { | |||
| return m.Qualifiers | |||
| } | |||
| return nil | |||
| } | |||
| type FuzzyRowFilter struct { | |||
| FuzzyKeysData []*BytesBytesPair `protobuf:"bytes,1,rep,name=fuzzy_keys_data" json:"fuzzy_keys_data,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FuzzyRowFilter) Reset() { *m = FuzzyRowFilter{} } | |||
| func (m *FuzzyRowFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*FuzzyRowFilter) ProtoMessage() {} | |||
| func (m *FuzzyRowFilter) GetFuzzyKeysData() []*BytesBytesPair { | |||
| if m != nil { | |||
| return m.FuzzyKeysData | |||
| } | |||
| return nil | |||
| } | |||
| type InclusiveStopFilter struct { | |||
| StopRowKey []byte `protobuf:"bytes,1,opt,name=stop_row_key" json:"stop_row_key,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *InclusiveStopFilter) Reset() { *m = InclusiveStopFilter{} } | |||
| func (m *InclusiveStopFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*InclusiveStopFilter) ProtoMessage() {} | |||
| func (m *InclusiveStopFilter) GetStopRowKey() []byte { | |||
| if m != nil { | |||
| return m.StopRowKey | |||
| } | |||
| return nil | |||
| } | |||
| type KeyOnlyFilter struct { | |||
| LenAsVal *bool `protobuf:"varint,1,req,name=len_as_val" json:"len_as_val,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *KeyOnlyFilter) Reset() { *m = KeyOnlyFilter{} } | |||
| func (m *KeyOnlyFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*KeyOnlyFilter) ProtoMessage() {} | |||
| func (m *KeyOnlyFilter) GetLenAsVal() bool { | |||
| if m != nil && m.LenAsVal != nil { | |||
| return *m.LenAsVal | |||
| } | |||
| return false | |||
| } | |||
| type MultipleColumnPrefixFilter struct { | |||
| SortedPrefixes [][]byte `protobuf:"bytes,1,rep,name=sorted_prefixes" json:"sorted_prefixes,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *MultipleColumnPrefixFilter) Reset() { *m = MultipleColumnPrefixFilter{} } | |||
| func (m *MultipleColumnPrefixFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*MultipleColumnPrefixFilter) ProtoMessage() {} | |||
| func (m *MultipleColumnPrefixFilter) GetSortedPrefixes() [][]byte { | |||
| if m != nil { | |||
| return m.SortedPrefixes | |||
| } | |||
| return nil | |||
| } | |||
| type PageFilter struct { | |||
| PageSize *int64 `protobuf:"varint,1,req,name=page_size" json:"page_size,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *PageFilter) Reset() { *m = PageFilter{} } | |||
| func (m *PageFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*PageFilter) ProtoMessage() {} | |||
| func (m *PageFilter) GetPageSize() int64 { | |||
| if m != nil && m.PageSize != nil { | |||
| return *m.PageSize | |||
| } | |||
| return 0 | |||
| } | |||
| type PrefixFilter struct { | |||
| Prefix []byte `protobuf:"bytes,1,opt,name=prefix" json:"prefix,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *PrefixFilter) Reset() { *m = PrefixFilter{} } | |||
| func (m *PrefixFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*PrefixFilter) ProtoMessage() {} | |||
| func (m *PrefixFilter) GetPrefix() []byte { | |||
| if m != nil { | |||
| return m.Prefix | |||
| } | |||
| return nil | |||
| } | |||
| type QualifierFilter struct { | |||
| CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *QualifierFilter) Reset() { *m = QualifierFilter{} } | |||
| func (m *QualifierFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*QualifierFilter) ProtoMessage() {} | |||
| func (m *QualifierFilter) GetCompareFilter() *CompareFilter { | |||
| if m != nil { | |||
| return m.CompareFilter | |||
| } | |||
| return nil | |||
| } | |||
| type RandomRowFilter struct { | |||
| Chance *float32 `protobuf:"fixed32,1,req,name=chance" json:"chance,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RandomRowFilter) Reset() { *m = RandomRowFilter{} } | |||
| func (m *RandomRowFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*RandomRowFilter) ProtoMessage() {} | |||
| func (m *RandomRowFilter) GetChance() float32 { | |||
| if m != nil && m.Chance != nil { | |||
| return *m.Chance | |||
| } | |||
| return 0 | |||
| } | |||
| type RowFilter struct { | |||
| CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RowFilter) Reset() { *m = RowFilter{} } | |||
| func (m *RowFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*RowFilter) ProtoMessage() {} | |||
| func (m *RowFilter) GetCompareFilter() *CompareFilter { | |||
| if m != nil { | |||
| return m.CompareFilter | |||
| } | |||
| return nil | |||
| } | |||
| type SingleColumnValueExcludeFilter struct { | |||
| SingleColumnValueFilter *SingleColumnValueFilter `protobuf:"bytes,1,req,name=single_column_value_filter" json:"single_column_value_filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SingleColumnValueExcludeFilter) Reset() { *m = SingleColumnValueExcludeFilter{} } | |||
| func (m *SingleColumnValueExcludeFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*SingleColumnValueExcludeFilter) ProtoMessage() {} | |||
| func (m *SingleColumnValueExcludeFilter) GetSingleColumnValueFilter() *SingleColumnValueFilter { | |||
| if m != nil { | |||
| return m.SingleColumnValueFilter | |||
| } | |||
| return nil | |||
| } | |||
| type SingleColumnValueFilter struct { | |||
| ColumnFamily []byte `protobuf:"bytes,1,opt,name=column_family" json:"column_family,omitempty"` | |||
| ColumnQualifier []byte `protobuf:"bytes,2,opt,name=column_qualifier" json:"column_qualifier,omitempty"` | |||
| CompareOp *CompareType `protobuf:"varint,3,req,name=compare_op,enum=proto.CompareType" json:"compare_op,omitempty"` | |||
| Comparator *Comparator `protobuf:"bytes,4,req,name=comparator" json:"comparator,omitempty"` | |||
| FilterIfMissing *bool `protobuf:"varint,5,opt,name=filter_if_missing" json:"filter_if_missing,omitempty"` | |||
| LatestVersionOnly *bool `protobuf:"varint,6,opt,name=latest_version_only" json:"latest_version_only,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SingleColumnValueFilter) Reset() { *m = SingleColumnValueFilter{} } | |||
| func (m *SingleColumnValueFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*SingleColumnValueFilter) ProtoMessage() {} | |||
| func (m *SingleColumnValueFilter) GetColumnFamily() []byte { | |||
| if m != nil { | |||
| return m.ColumnFamily | |||
| } | |||
| return nil | |||
| } | |||
| func (m *SingleColumnValueFilter) GetColumnQualifier() []byte { | |||
| if m != nil { | |||
| return m.ColumnQualifier | |||
| } | |||
| return nil | |||
| } | |||
| func (m *SingleColumnValueFilter) GetCompareOp() CompareType { | |||
| if m != nil && m.CompareOp != nil { | |||
| return *m.CompareOp | |||
| } | |||
| return CompareType_LESS | |||
| } | |||
| func (m *SingleColumnValueFilter) GetComparator() *Comparator { | |||
| if m != nil { | |||
| return m.Comparator | |||
| } | |||
| return nil | |||
| } | |||
| func (m *SingleColumnValueFilter) GetFilterIfMissing() bool { | |||
| if m != nil && m.FilterIfMissing != nil { | |||
| return *m.FilterIfMissing | |||
| } | |||
| return false | |||
| } | |||
| func (m *SingleColumnValueFilter) GetLatestVersionOnly() bool { | |||
| if m != nil && m.LatestVersionOnly != nil { | |||
| return *m.LatestVersionOnly | |||
| } | |||
| return false | |||
| } | |||
| type SkipFilter struct { | |||
| Filter *Filter `protobuf:"bytes,1,req,name=filter" json:"filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SkipFilter) Reset() { *m = SkipFilter{} } | |||
| func (m *SkipFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*SkipFilter) ProtoMessage() {} | |||
| func (m *SkipFilter) GetFilter() *Filter { | |||
| if m != nil { | |||
| return m.Filter | |||
| } | |||
| return nil | |||
| } | |||
| type TimestampsFilter struct { | |||
| Timestamps []int64 `protobuf:"varint,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *TimestampsFilter) Reset() { *m = TimestampsFilter{} } | |||
| func (m *TimestampsFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*TimestampsFilter) ProtoMessage() {} | |||
| func (m *TimestampsFilter) GetTimestamps() []int64 { | |||
| if m != nil { | |||
| return m.Timestamps | |||
| } | |||
| return nil | |||
| } | |||
| type ValueFilter struct { | |||
| CompareFilter *CompareFilter `protobuf:"bytes,1,req,name=compare_filter" json:"compare_filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ValueFilter) Reset() { *m = ValueFilter{} } | |||
| func (m *ValueFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*ValueFilter) ProtoMessage() {} | |||
| func (m *ValueFilter) GetCompareFilter() *CompareFilter { | |||
| if m != nil { | |||
| return m.CompareFilter | |||
| } | |||
| return nil | |||
| } | |||
| type WhileMatchFilter struct { | |||
| Filter *Filter `protobuf:"bytes,1,req,name=filter" json:"filter,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *WhileMatchFilter) Reset() { *m = WhileMatchFilter{} } | |||
| func (m *WhileMatchFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*WhileMatchFilter) ProtoMessage() {} | |||
| func (m *WhileMatchFilter) GetFilter() *Filter { | |||
| if m != nil { | |||
| return m.Filter | |||
| } | |||
| return nil | |||
| } | |||
| type FilterAllFilter struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FilterAllFilter) Reset() { *m = FilterAllFilter{} } | |||
| func (m *FilterAllFilter) String() string { return proto1.CompactTextString(m) } | |||
| func (*FilterAllFilter) ProtoMessage() {} | |||
| func init() { | |||
| proto1.RegisterEnum("proto.FilterList_Operator", FilterList_Operator_name, FilterList_Operator_value) | |||
| } | |||
| @@ -1,741 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: HBase.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| // Comparison operators | |||
| type CompareType int32 | |||
| const ( | |||
| CompareType_LESS CompareType = 0 | |||
| CompareType_LESS_OR_EQUAL CompareType = 1 | |||
| CompareType_EQUAL CompareType = 2 | |||
| CompareType_NOT_EQUAL CompareType = 3 | |||
| CompareType_GREATER_OR_EQUAL CompareType = 4 | |||
| CompareType_GREATER CompareType = 5 | |||
| CompareType_NO_OP CompareType = 6 | |||
| ) | |||
| var CompareType_name = map[int32]string{ | |||
| 0: "LESS", | |||
| 1: "LESS_OR_EQUAL", | |||
| 2: "EQUAL", | |||
| 3: "NOT_EQUAL", | |||
| 4: "GREATER_OR_EQUAL", | |||
| 5: "GREATER", | |||
| 6: "NO_OP", | |||
| } | |||
| var CompareType_value = map[string]int32{ | |||
| "LESS": 0, | |||
| "LESS_OR_EQUAL": 1, | |||
| "EQUAL": 2, | |||
| "NOT_EQUAL": 3, | |||
| "GREATER_OR_EQUAL": 4, | |||
| "GREATER": 5, | |||
| "NO_OP": 6, | |||
| } | |||
| func (x CompareType) Enum() *CompareType { | |||
| p := new(CompareType) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x CompareType) String() string { | |||
| return proto1.EnumName(CompareType_name, int32(x)) | |||
| } | |||
| func (x *CompareType) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(CompareType_value, data, "CompareType") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = CompareType(value) | |||
| return nil | |||
| } | |||
| type RegionSpecifier_RegionSpecifierType int32 | |||
| const ( | |||
| // <tablename>,<startkey>,<regionId>.<encodedName> | |||
| RegionSpecifier_REGION_NAME RegionSpecifier_RegionSpecifierType = 1 | |||
| // hash of <tablename>,<startkey>,<regionId> | |||
| RegionSpecifier_ENCODED_REGION_NAME RegionSpecifier_RegionSpecifierType = 2 | |||
| ) | |||
| var RegionSpecifier_RegionSpecifierType_name = map[int32]string{ | |||
| 1: "REGION_NAME", | |||
| 2: "ENCODED_REGION_NAME", | |||
| } | |||
| var RegionSpecifier_RegionSpecifierType_value = map[string]int32{ | |||
| "REGION_NAME": 1, | |||
| "ENCODED_REGION_NAME": 2, | |||
| } | |||
| func (x RegionSpecifier_RegionSpecifierType) Enum() *RegionSpecifier_RegionSpecifierType { | |||
| p := new(RegionSpecifier_RegionSpecifierType) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x RegionSpecifier_RegionSpecifierType) String() string { | |||
| return proto1.EnumName(RegionSpecifier_RegionSpecifierType_name, int32(x)) | |||
| } | |||
| func (x *RegionSpecifier_RegionSpecifierType) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(RegionSpecifier_RegionSpecifierType_value, data, "RegionSpecifier_RegionSpecifierType") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = RegionSpecifier_RegionSpecifierType(value) | |||
| return nil | |||
| } | |||
| type SnapshotDescription_Type int32 | |||
| const ( | |||
| SnapshotDescription_DISABLED SnapshotDescription_Type = 0 | |||
| SnapshotDescription_FLUSH SnapshotDescription_Type = 1 | |||
| SnapshotDescription_SKIPFLUSH SnapshotDescription_Type = 2 | |||
| ) | |||
| var SnapshotDescription_Type_name = map[int32]string{ | |||
| 0: "DISABLED", | |||
| 1: "FLUSH", | |||
| 2: "SKIPFLUSH", | |||
| } | |||
| var SnapshotDescription_Type_value = map[string]int32{ | |||
| "DISABLED": 0, | |||
| "FLUSH": 1, | |||
| "SKIPFLUSH": 2, | |||
| } | |||
| func (x SnapshotDescription_Type) Enum() *SnapshotDescription_Type { | |||
| p := new(SnapshotDescription_Type) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x SnapshotDescription_Type) String() string { | |||
| return proto1.EnumName(SnapshotDescription_Type_name, int32(x)) | |||
| } | |||
| func (x *SnapshotDescription_Type) UnmarshalJSON(data []byte) error { | |||
| value, err := proto1.UnmarshalJSONEnum(SnapshotDescription_Type_value, data, "SnapshotDescription_Type") | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = SnapshotDescription_Type(value) | |||
| return nil | |||
| } | |||
| // * | |||
| // Table Name | |||
| type TableName struct { | |||
| Namespace []byte `protobuf:"bytes,1,req,name=namespace" json:"namespace,omitempty"` | |||
| Qualifier []byte `protobuf:"bytes,2,req,name=qualifier" json:"qualifier,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *TableName) Reset() { *m = TableName{} } | |||
| func (m *TableName) String() string { return proto1.CompactTextString(m) } | |||
| func (*TableName) ProtoMessage() {} | |||
| func (m *TableName) GetNamespace() []byte { | |||
| if m != nil { | |||
| return m.Namespace | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TableName) GetQualifier() []byte { | |||
| if m != nil { | |||
| return m.Qualifier | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Table Schema | |||
| // Inspired by the rest TableSchema | |||
| type TableSchema struct { | |||
| TableName *TableName `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` | |||
| Attributes []*BytesBytesPair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` | |||
| ColumnFamilies []*ColumnFamilySchema `protobuf:"bytes,3,rep,name=column_families" json:"column_families,omitempty"` | |||
| Configuration []*NameStringPair `protobuf:"bytes,4,rep,name=configuration" json:"configuration,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *TableSchema) Reset() { *m = TableSchema{} } | |||
| func (m *TableSchema) String() string { return proto1.CompactTextString(m) } | |||
| func (*TableSchema) ProtoMessage() {} | |||
| func (m *TableSchema) GetTableName() *TableName { | |||
| if m != nil { | |||
| return m.TableName | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TableSchema) GetAttributes() []*BytesBytesPair { | |||
| if m != nil { | |||
| return m.Attributes | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TableSchema) GetColumnFamilies() []*ColumnFamilySchema { | |||
| if m != nil { | |||
| return m.ColumnFamilies | |||
| } | |||
| return nil | |||
| } | |||
| func (m *TableSchema) GetConfiguration() []*NameStringPair { | |||
| if m != nil { | |||
| return m.Configuration | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Column Family Schema | |||
| // Inspired by the rest ColumSchemaMessage | |||
| type ColumnFamilySchema struct { | |||
| Name []byte `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| Attributes []*BytesBytesPair `protobuf:"bytes,2,rep,name=attributes" json:"attributes,omitempty"` | |||
| Configuration []*NameStringPair `protobuf:"bytes,3,rep,name=configuration" json:"configuration,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ColumnFamilySchema) Reset() { *m = ColumnFamilySchema{} } | |||
| func (m *ColumnFamilySchema) String() string { return proto1.CompactTextString(m) } | |||
| func (*ColumnFamilySchema) ProtoMessage() {} | |||
| func (m *ColumnFamilySchema) GetName() []byte { | |||
| if m != nil { | |||
| return m.Name | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ColumnFamilySchema) GetAttributes() []*BytesBytesPair { | |||
| if m != nil { | |||
| return m.Attributes | |||
| } | |||
| return nil | |||
| } | |||
| func (m *ColumnFamilySchema) GetConfiguration() []*NameStringPair { | |||
| if m != nil { | |||
| return m.Configuration | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Protocol buffer version of HRegionInfo. | |||
| type RegionInfo struct { | |||
| RegionId *uint64 `protobuf:"varint,1,req,name=region_id" json:"region_id,omitempty"` | |||
| TableName *TableName `protobuf:"bytes,2,req,name=table_name" json:"table_name,omitempty"` | |||
| StartKey []byte `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` | |||
| EndKey []byte `protobuf:"bytes,4,opt,name=end_key" json:"end_key,omitempty"` | |||
| Offline *bool `protobuf:"varint,5,opt,name=offline" json:"offline,omitempty"` | |||
| Split *bool `protobuf:"varint,6,opt,name=split" json:"split,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegionInfo) Reset() { *m = RegionInfo{} } | |||
| func (m *RegionInfo) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegionInfo) ProtoMessage() {} | |||
| func (m *RegionInfo) GetRegionId() uint64 { | |||
| if m != nil && m.RegionId != nil { | |||
| return *m.RegionId | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *RegionInfo) GetTableName() *TableName { | |||
| if m != nil { | |||
| return m.TableName | |||
| } | |||
| return nil | |||
| } | |||
| func (m *RegionInfo) GetStartKey() []byte { | |||
| if m != nil { | |||
| return m.StartKey | |||
| } | |||
| return nil | |||
| } | |||
| func (m *RegionInfo) GetEndKey() []byte { | |||
| if m != nil { | |||
| return m.EndKey | |||
| } | |||
| return nil | |||
| } | |||
| func (m *RegionInfo) GetOffline() bool { | |||
| if m != nil && m.Offline != nil { | |||
| return *m.Offline | |||
| } | |||
| return false | |||
| } | |||
| func (m *RegionInfo) GetSplit() bool { | |||
| if m != nil && m.Split != nil { | |||
| return *m.Split | |||
| } | |||
| return false | |||
| } | |||
| // * | |||
| // Protocol buffer for favored nodes | |||
| type FavoredNodes struct { | |||
| FavoredNode []*ServerName `protobuf:"bytes,1,rep,name=favored_node" json:"favored_node,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FavoredNodes) Reset() { *m = FavoredNodes{} } | |||
| func (m *FavoredNodes) String() string { return proto1.CompactTextString(m) } | |||
| func (*FavoredNodes) ProtoMessage() {} | |||
| func (m *FavoredNodes) GetFavoredNode() []*ServerName { | |||
| if m != nil { | |||
| return m.FavoredNode | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Container protocol buffer to specify a region. | |||
| // You can specify region by region name, or the hash | |||
| // of the region name, which is known as encoded | |||
| // region name. | |||
| type RegionSpecifier struct { | |||
| Type *RegionSpecifier_RegionSpecifierType `protobuf:"varint,1,req,name=type,enum=proto.RegionSpecifier_RegionSpecifierType" json:"type,omitempty"` | |||
| Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegionSpecifier) Reset() { *m = RegionSpecifier{} } | |||
| func (m *RegionSpecifier) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegionSpecifier) ProtoMessage() {} | |||
| func (m *RegionSpecifier) GetType() RegionSpecifier_RegionSpecifierType { | |||
| if m != nil && m.Type != nil { | |||
| return *m.Type | |||
| } | |||
| return RegionSpecifier_REGION_NAME | |||
| } | |||
| func (m *RegionSpecifier) GetValue() []byte { | |||
| if m != nil { | |||
| return m.Value | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // A range of time. Both from and to are Java time | |||
| // stamp in milliseconds. If you don't specify a time | |||
| // range, it means all time. By default, if not | |||
| // specified, from = 0, and to = Long.MAX_VALUE | |||
| type TimeRange struct { | |||
| From *uint64 `protobuf:"varint,1,opt,name=from" json:"from,omitempty"` | |||
| To *uint64 `protobuf:"varint,2,opt,name=to" json:"to,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *TimeRange) Reset() { *m = TimeRange{} } | |||
| func (m *TimeRange) String() string { return proto1.CompactTextString(m) } | |||
| func (*TimeRange) ProtoMessage() {} | |||
| func (m *TimeRange) GetFrom() uint64 { | |||
| if m != nil && m.From != nil { | |||
| return *m.From | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *TimeRange) GetTo() uint64 { | |||
| if m != nil && m.To != nil { | |||
| return *m.To | |||
| } | |||
| return 0 | |||
| } | |||
| // * | |||
| // Protocol buffer version of ServerName | |||
| type ServerName struct { | |||
| HostName *string `protobuf:"bytes,1,req,name=host_name" json:"host_name,omitempty"` | |||
| Port *uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` | |||
| StartCode *uint64 `protobuf:"varint,3,opt,name=start_code" json:"start_code,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ServerName) Reset() { *m = ServerName{} } | |||
| func (m *ServerName) String() string { return proto1.CompactTextString(m) } | |||
| func (*ServerName) ProtoMessage() {} | |||
| func (m *ServerName) GetHostName() string { | |||
| if m != nil && m.HostName != nil { | |||
| return *m.HostName | |||
| } | |||
| return "" | |||
| } | |||
| func (m *ServerName) GetPort() uint32 { | |||
| if m != nil && m.Port != nil { | |||
| return *m.Port | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *ServerName) GetStartCode() uint64 { | |||
| if m != nil && m.StartCode != nil { | |||
| return *m.StartCode | |||
| } | |||
| return 0 | |||
| } | |||
| type Coprocessor struct { | |||
| Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Coprocessor) Reset() { *m = Coprocessor{} } | |||
| func (m *Coprocessor) String() string { return proto1.CompactTextString(m) } | |||
| func (*Coprocessor) ProtoMessage() {} | |||
| func (m *Coprocessor) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| type NameStringPair struct { | |||
| Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *NameStringPair) Reset() { *m = NameStringPair{} } | |||
| func (m *NameStringPair) String() string { return proto1.CompactTextString(m) } | |||
| func (*NameStringPair) ProtoMessage() {} | |||
| func (m *NameStringPair) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| func (m *NameStringPair) GetValue() string { | |||
| if m != nil && m.Value != nil { | |||
| return *m.Value | |||
| } | |||
| return "" | |||
| } | |||
| type NameBytesPair struct { | |||
| Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *NameBytesPair) Reset() { *m = NameBytesPair{} } | |||
| func (m *NameBytesPair) String() string { return proto1.CompactTextString(m) } | |||
| func (*NameBytesPair) ProtoMessage() {} | |||
| func (m *NameBytesPair) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| func (m *NameBytesPair) GetValue() []byte { | |||
| if m != nil { | |||
| return m.Value | |||
| } | |||
| return nil | |||
| } | |||
| type BytesBytesPair struct { | |||
| First []byte `protobuf:"bytes,1,req,name=first" json:"first,omitempty"` | |||
| Second []byte `protobuf:"bytes,2,req,name=second" json:"second,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *BytesBytesPair) Reset() { *m = BytesBytesPair{} } | |||
| func (m *BytesBytesPair) String() string { return proto1.CompactTextString(m) } | |||
| func (*BytesBytesPair) ProtoMessage() {} | |||
| func (m *BytesBytesPair) GetFirst() []byte { | |||
| if m != nil { | |||
| return m.First | |||
| } | |||
| return nil | |||
| } | |||
| func (m *BytesBytesPair) GetSecond() []byte { | |||
| if m != nil { | |||
| return m.Second | |||
| } | |||
| return nil | |||
| } | |||
| type NameInt64Pair struct { | |||
| Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` | |||
| Value *int64 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *NameInt64Pair) Reset() { *m = NameInt64Pair{} } | |||
| func (m *NameInt64Pair) String() string { return proto1.CompactTextString(m) } | |||
| func (*NameInt64Pair) ProtoMessage() {} | |||
| func (m *NameInt64Pair) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| func (m *NameInt64Pair) GetValue() int64 { | |||
| if m != nil && m.Value != nil { | |||
| return *m.Value | |||
| } | |||
| return 0 | |||
| } | |||
| // * | |||
| // Description of the snapshot to take | |||
| type SnapshotDescription struct { | |||
| Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| Table *string `protobuf:"bytes,2,opt,name=table" json:"table,omitempty"` | |||
| CreationTime *int64 `protobuf:"varint,3,opt,name=creation_time,def=0" json:"creation_time,omitempty"` | |||
| Type *SnapshotDescription_Type `protobuf:"varint,4,opt,name=type,enum=proto.SnapshotDescription_Type,def=1" json:"type,omitempty"` | |||
| Version *int32 `protobuf:"varint,5,opt,name=version" json:"version,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *SnapshotDescription) Reset() { *m = SnapshotDescription{} } | |||
| func (m *SnapshotDescription) String() string { return proto1.CompactTextString(m) } | |||
| func (*SnapshotDescription) ProtoMessage() {} | |||
| const Default_SnapshotDescription_CreationTime int64 = 0 | |||
| const Default_SnapshotDescription_Type SnapshotDescription_Type = SnapshotDescription_FLUSH | |||
| func (m *SnapshotDescription) GetName() string { | |||
| if m != nil && m.Name != nil { | |||
| return *m.Name | |||
| } | |||
| return "" | |||
| } | |||
| func (m *SnapshotDescription) GetTable() string { | |||
| if m != nil && m.Table != nil { | |||
| return *m.Table | |||
| } | |||
| return "" | |||
| } | |||
| func (m *SnapshotDescription) GetCreationTime() int64 { | |||
| if m != nil && m.CreationTime != nil { | |||
| return *m.CreationTime | |||
| } | |||
| return Default_SnapshotDescription_CreationTime | |||
| } | |||
| func (m *SnapshotDescription) GetType() SnapshotDescription_Type { | |||
| if m != nil && m.Type != nil { | |||
| return *m.Type | |||
| } | |||
| return Default_SnapshotDescription_Type | |||
| } | |||
| func (m *SnapshotDescription) GetVersion() int32 { | |||
| if m != nil && m.Version != nil { | |||
| return *m.Version | |||
| } | |||
| return 0 | |||
| } | |||
| // * | |||
| // Description of the distributed procedure to take | |||
| type ProcedureDescription struct { | |||
| Signature *string `protobuf:"bytes,1,req,name=signature" json:"signature,omitempty"` | |||
| Instance *string `protobuf:"bytes,2,opt,name=instance" json:"instance,omitempty"` | |||
| CreationTime *int64 `protobuf:"varint,3,opt,name=creation_time,def=0" json:"creation_time,omitempty"` | |||
| Configuration []*NameStringPair `protobuf:"bytes,4,rep,name=configuration" json:"configuration,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *ProcedureDescription) Reset() { *m = ProcedureDescription{} } | |||
| func (m *ProcedureDescription) String() string { return proto1.CompactTextString(m) } | |||
| func (*ProcedureDescription) ProtoMessage() {} | |||
| const Default_ProcedureDescription_CreationTime int64 = 0 | |||
| func (m *ProcedureDescription) GetSignature() string { | |||
| if m != nil && m.Signature != nil { | |||
| return *m.Signature | |||
| } | |||
| return "" | |||
| } | |||
| func (m *ProcedureDescription) GetInstance() string { | |||
| if m != nil && m.Instance != nil { | |||
| return *m.Instance | |||
| } | |||
| return "" | |||
| } | |||
| func (m *ProcedureDescription) GetCreationTime() int64 { | |||
| if m != nil && m.CreationTime != nil { | |||
| return *m.CreationTime | |||
| } | |||
| return Default_ProcedureDescription_CreationTime | |||
| } | |||
| func (m *ProcedureDescription) GetConfiguration() []*NameStringPair { | |||
| if m != nil { | |||
| return m.Configuration | |||
| } | |||
| return nil | |||
| } | |||
| type EmptyMsg struct { | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *EmptyMsg) Reset() { *m = EmptyMsg{} } | |||
| func (m *EmptyMsg) String() string { return proto1.CompactTextString(m) } | |||
| func (*EmptyMsg) ProtoMessage() {} | |||
| type LongMsg struct { | |||
| LongMsg *int64 `protobuf:"varint,1,req,name=long_msg" json:"long_msg,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *LongMsg) Reset() { *m = LongMsg{} } | |||
| func (m *LongMsg) String() string { return proto1.CompactTextString(m) } | |||
| func (*LongMsg) ProtoMessage() {} | |||
| func (m *LongMsg) GetLongMsg() int64 { | |||
| if m != nil && m.LongMsg != nil { | |||
| return *m.LongMsg | |||
| } | |||
| return 0 | |||
| } | |||
| type DoubleMsg struct { | |||
| DoubleMsg *float64 `protobuf:"fixed64,1,req,name=double_msg" json:"double_msg,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *DoubleMsg) Reset() { *m = DoubleMsg{} } | |||
| func (m *DoubleMsg) String() string { return proto1.CompactTextString(m) } | |||
| func (*DoubleMsg) ProtoMessage() {} | |||
| func (m *DoubleMsg) GetDoubleMsg() float64 { | |||
| if m != nil && m.DoubleMsg != nil { | |||
| return *m.DoubleMsg | |||
| } | |||
| return 0 | |||
| } | |||
| type BigDecimalMsg struct { | |||
| BigdecimalMsg []byte `protobuf:"bytes,1,req,name=bigdecimal_msg" json:"bigdecimal_msg,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *BigDecimalMsg) Reset() { *m = BigDecimalMsg{} } | |||
| func (m *BigDecimalMsg) String() string { return proto1.CompactTextString(m) } | |||
| func (*BigDecimalMsg) ProtoMessage() {} | |||
| func (m *BigDecimalMsg) GetBigdecimalMsg() []byte { | |||
| if m != nil { | |||
| return m.BigdecimalMsg | |||
| } | |||
| return nil | |||
| } | |||
| type UUID struct { | |||
| LeastSigBits *uint64 `protobuf:"varint,1,req,name=least_sig_bits" json:"least_sig_bits,omitempty"` | |||
| MostSigBits *uint64 `protobuf:"varint,2,req,name=most_sig_bits" json:"most_sig_bits,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *UUID) Reset() { *m = UUID{} } | |||
| func (m *UUID) String() string { return proto1.CompactTextString(m) } | |||
| func (*UUID) ProtoMessage() {} | |||
| func (m *UUID) GetLeastSigBits() uint64 { | |||
| if m != nil && m.LeastSigBits != nil { | |||
| return *m.LeastSigBits | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *UUID) GetMostSigBits() uint64 { | |||
| if m != nil && m.MostSigBits != nil { | |||
| return *m.MostSigBits | |||
| } | |||
| return 0 | |||
| } | |||
| type NamespaceDescriptor struct { | |||
| Name []byte `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` | |||
| Configuration []*NameStringPair `protobuf:"bytes,2,rep,name=configuration" json:"configuration,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *NamespaceDescriptor) Reset() { *m = NamespaceDescriptor{} } | |||
| func (m *NamespaceDescriptor) String() string { return proto1.CompactTextString(m) } | |||
| func (*NamespaceDescriptor) ProtoMessage() {} | |||
| func (m *NamespaceDescriptor) GetName() []byte { | |||
| if m != nil { | |||
| return m.Name | |||
| } | |||
| return nil | |||
| } | |||
| func (m *NamespaceDescriptor) GetConfiguration() []*NameStringPair { | |||
| if m != nil { | |||
| return m.Configuration | |||
| } | |||
| return nil | |||
| } | |||
| // * | |||
| // Description of the region server info | |||
| type RegionServerInfo struct { | |||
| InfoPort *int32 `protobuf:"varint,1,opt,name=infoPort" json:"infoPort,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *RegionServerInfo) Reset() { *m = RegionServerInfo{} } | |||
| func (m *RegionServerInfo) String() string { return proto1.CompactTextString(m) } | |||
| func (*RegionServerInfo) ProtoMessage() {} | |||
| func (m *RegionServerInfo) GetInfoPort() int32 { | |||
| if m != nil && m.InfoPort != nil { | |||
| return *m.InfoPort | |||
| } | |||
| return 0 | |||
| } | |||
| func init() { | |||
| proto1.RegisterEnum("proto.CompareType", CompareType_name, CompareType_value) | |||
| proto1.RegisterEnum("proto.RegionSpecifier_RegionSpecifierType", RegionSpecifier_RegionSpecifierType_name, RegionSpecifier_RegionSpecifierType_value) | |||
| proto1.RegisterEnum("proto.SnapshotDescription_Type", SnapshotDescription_Type_name, SnapshotDescription_Type_value) | |||
| } | |||
| @@ -1,145 +0,0 @@ | |||
| // Code generated by protoc-gen-go. | |||
| // source: HFile.proto | |||
| // DO NOT EDIT! | |||
| package proto | |||
| import proto1 "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| // Reference imports to suppress errors if they are not otherwise used. | |||
| var _ = proto1.Marshal | |||
| var _ = math.Inf | |||
| // Map of name/values | |||
| type FileInfoProto struct { | |||
| MapEntry []*BytesBytesPair `protobuf:"bytes,1,rep,name=map_entry" json:"map_entry,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FileInfoProto) Reset() { *m = FileInfoProto{} } | |||
| func (m *FileInfoProto) String() string { return proto1.CompactTextString(m) } | |||
| func (*FileInfoProto) ProtoMessage() {} | |||
| func (m *FileInfoProto) GetMapEntry() []*BytesBytesPair { | |||
| if m != nil { | |||
| return m.MapEntry | |||
| } | |||
| return nil | |||
| } | |||
| // HFile file trailer | |||
| type FileTrailerProto struct { | |||
| FileInfoOffset *uint64 `protobuf:"varint,1,opt,name=file_info_offset" json:"file_info_offset,omitempty"` | |||
| LoadOnOpenDataOffset *uint64 `protobuf:"varint,2,opt,name=load_on_open_data_offset" json:"load_on_open_data_offset,omitempty"` | |||
| UncompressedDataIndexSize *uint64 `protobuf:"varint,3,opt,name=uncompressed_data_index_size" json:"uncompressed_data_index_size,omitempty"` | |||
| TotalUncompressedBytes *uint64 `protobuf:"varint,4,opt,name=total_uncompressed_bytes" json:"total_uncompressed_bytes,omitempty"` | |||
| DataIndexCount *uint32 `protobuf:"varint,5,opt,name=data_index_count" json:"data_index_count,omitempty"` | |||
| MetaIndexCount *uint32 `protobuf:"varint,6,opt,name=meta_index_count" json:"meta_index_count,omitempty"` | |||
| EntryCount *uint64 `protobuf:"varint,7,opt,name=entry_count" json:"entry_count,omitempty"` | |||
| NumDataIndexLevels *uint32 `protobuf:"varint,8,opt,name=num_data_index_levels" json:"num_data_index_levels,omitempty"` | |||
| FirstDataBlockOffset *uint64 `protobuf:"varint,9,opt,name=first_data_block_offset" json:"first_data_block_offset,omitempty"` | |||
| LastDataBlockOffset *uint64 `protobuf:"varint,10,opt,name=last_data_block_offset" json:"last_data_block_offset,omitempty"` | |||
| ComparatorClassName *string `protobuf:"bytes,11,opt,name=comparator_class_name" json:"comparator_class_name,omitempty"` | |||
| CompressionCodec *uint32 `protobuf:"varint,12,opt,name=compression_codec" json:"compression_codec,omitempty"` | |||
| EncryptionKey []byte `protobuf:"bytes,13,opt,name=encryption_key" json:"encryption_key,omitempty"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *FileTrailerProto) Reset() { *m = FileTrailerProto{} } | |||
| func (m *FileTrailerProto) String() string { return proto1.CompactTextString(m) } | |||
| func (*FileTrailerProto) ProtoMessage() {} | |||
| func (m *FileTrailerProto) GetFileInfoOffset() uint64 { | |||
| if m != nil && m.FileInfoOffset != nil { | |||
| return *m.FileInfoOffset | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetLoadOnOpenDataOffset() uint64 { | |||
| if m != nil && m.LoadOnOpenDataOffset != nil { | |||
| return *m.LoadOnOpenDataOffset | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetUncompressedDataIndexSize() uint64 { | |||
| if m != nil && m.UncompressedDataIndexSize != nil { | |||
| return *m.UncompressedDataIndexSize | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetTotalUncompressedBytes() uint64 { | |||
| if m != nil && m.TotalUncompressedBytes != nil { | |||
| return *m.TotalUncompressedBytes | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetDataIndexCount() uint32 { | |||
| if m != nil && m.DataIndexCount != nil { | |||
| return *m.DataIndexCount | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetMetaIndexCount() uint32 { | |||
| if m != nil && m.MetaIndexCount != nil { | |||
| return *m.MetaIndexCount | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetEntryCount() uint64 { | |||
| if m != nil && m.EntryCount != nil { | |||
| return *m.EntryCount | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetNumDataIndexLevels() uint32 { | |||
| if m != nil && m.NumDataIndexLevels != nil { | |||
| return *m.NumDataIndexLevels | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetFirstDataBlockOffset() uint64 { | |||
| if m != nil && m.FirstDataBlockOffset != nil { | |||
| return *m.FirstDataBlockOffset | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetLastDataBlockOffset() uint64 { | |||
| if m != nil && m.LastDataBlockOffset != nil { | |||
| return *m.LastDataBlockOffset | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetComparatorClassName() string { | |||
| if m != nil && m.ComparatorClassName != nil { | |||
| return *m.ComparatorClassName | |||
| } | |||
| return "" | |||
| } | |||
| func (m *FileTrailerProto) GetCompressionCodec() uint32 { | |||
| if m != nil && m.CompressionCodec != nil { | |||
| return *m.CompressionCodec | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *FileTrailerProto) GetEncryptionKey() []byte { | |||
| if m != nil { | |||
| return m.EncryptionKey | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| } | |||