* Dropped unused codekit config * Integrated dynamic and static bindata for public * Ignore public bindata * Add a general generate make task * Integrated flexible public assets into web command * Updated vendoring, added all missiong govendor deps * Made the linter happy with the bindata and dynamic code * Moved public bindata definition to modules directory * Ignoring the new bindata path now * Updated to the new public modules import path * Updated public bindata command and drop the new prefixtags/v1.21.12.1
| @@ -28,6 +28,8 @@ _testmain.go | |||
| coverage.out | |||
| /modules/public/bindata.go | |||
| *.db | |||
| *.log | |||
| @@ -37,12 +37,6 @@ clean: | |||
| go clean -i ./... | |||
| rm -rf $(BIN) $(DIST) | |||
| .PHONY: deps | |||
| deps: | |||
| @which go-bindata > /dev/null; if [ $$? -ne 0 ]; then \ | |||
| go get -u github.com/jteeuwen/go-bindata/...; \ | |||
| fi | |||
| .PHONY: fmt | |||
| fmt: | |||
| go fmt $(PACKAGES) | |||
| @@ -51,6 +45,13 @@ fmt: | |||
| vet: | |||
| go vet $(PACKAGES) | |||
| .PHONY: generate | |||
| generate: | |||
| @which go-bindata > /dev/null; if [ $$? -ne 0 ]; then \ | |||
| go get -u github.com/jteeuwen/go-bindata/...; \ | |||
| fi | |||
| go generate $(PACKAGES) | |||
| .PHONY: errcheck | |||
| errcheck: | |||
| @which errcheck > /dev/null; if [ $$? -ne 0 ]; then \ | |||
| @@ -129,6 +130,9 @@ bindata: modules/bindata/bindata.go | |||
| .IGNORE: modules/bindata/bindata.go | |||
| modules/bindata/bindata.go: $(BINDATA) | |||
| @which go-bindata > /dev/null; if [ $$? -ne 0 ]; then \ | |||
| go get -u github.com/jteeuwen/go-bindata/...; \ | |||
| fi | |||
| go-bindata -o=$@ -ignore="\\.go|README.md|TRANSLATORS" -pkg=bindata conf/... | |||
| go fmt $@ | |||
| sed -i.bak 's/confLocaleLocale_/confLocaleLocale/' $@ | |||
| @@ -148,5 +152,5 @@ stylesheets: public/css/index.css | |||
| public/css/index.css: $(STYLESHEETS) | |||
| lessc $< $@ | |||
| .PHONY: generate | |||
| generate: bindata javascripts stylesheets | |||
| .PHONY: assets | |||
| assets: bindata javascripts stylesheets | |||
| @@ -21,6 +21,7 @@ import ( | |||
| "code.gitea.io/gitea/modules/bindata" | |||
| "code.gitea.io/gitea/modules/context" | |||
| "code.gitea.io/gitea/modules/log" | |||
| "code.gitea.io/gitea/modules/public" | |||
| "code.gitea.io/gitea/modules/setting" | |||
| "code.gitea.io/gitea/modules/template" | |||
| "code.gitea.io/gitea/routers" | |||
| @@ -125,9 +126,9 @@ func newMacaron() *macaron.Macaron { | |||
| if setting.Protocol == setting.FCGI { | |||
| m.SetURLPrefix(setting.AppSubURL) | |||
| } | |||
| m.Use(macaron.Static( | |||
| path.Join(setting.StaticRootPath, "public"), | |||
| macaron.StaticOptions{ | |||
| m.Use(public.Static( | |||
| &public.Options{ | |||
| Directory: path.Join(setting.StaticRootPath, "public"), | |||
| SkipLogging: setting.DisableRouterLog, | |||
| }, | |||
| )) | |||
| @@ -0,0 +1,21 @@ | |||
| // +build !bindata | |||
| // Copyright 2016 The Gitea Authors. All rights reserved. | |||
| // Use of this source code is governed by a MIT-style | |||
| // license that can be found in the LICENSE file. | |||
| package public | |||
| import ( | |||
| "gopkg.in/macaron.v1" | |||
| ) | |||
| // Static implements the macaron static handler for serving assets. | |||
| func Static(opts *Options) macaron.Handler { | |||
| return macaron.Static( | |||
| opts.Directory, | |||
| macaron.StaticOptions{ | |||
| SkipLogging: opts.SkipLogging, | |||
| }, | |||
| ) | |||
| } | |||
| @@ -0,0 +1,14 @@ | |||
| // Copyright 2016 The Gitea Authors. All rights reserved. | |||
| // Use of this source code is governed by a MIT-style | |||
| // license that can be found in the LICENSE file. | |||
| package public | |||
| //go:generate go-bindata -tags "bindata" -ignore "\\.go|\\.less" -pkg "public" -o "bindata.go" ../../public/... | |||
| //go:generate go fmt bindata.go | |||
| // Options represents the available options to configure the macaron handler. | |||
| type Options struct { | |||
| Directory string | |||
| SkipLogging bool | |||
| } | |||
| @@ -0,0 +1,29 @@ | |||
| // +build bindata | |||
| // Copyright 2016 The Gitea Authors. All rights reserved. | |||
| // Use of this source code is governed by a MIT-style | |||
| // license that can be found in the LICENSE file. | |||
| package public | |||
| import ( | |||
| "github.com/go-macaron/bindata" | |||
| "gopkg.in/macaron.v1" | |||
| ) | |||
| // Static implements the macaron static handler for serving assets. | |||
| func Static(opts *Options) macaron.Handler { | |||
| return macaron.Static( | |||
| opts.Directory, | |||
| macaron.StaticOptions{ | |||
| SkipLogging: opts.SkipLogging, | |||
| FileSystem: bindata.Static(bindata.Options{ | |||
| Asset: Asset, | |||
| AssetDir: AssetDir, | |||
| AssetInfo: AssetInfo, | |||
| AssetNames: AssetNames, | |||
| Prefix: "../../public", | |||
| }), | |||
| }, | |||
| ) | |||
| } | |||
| @@ -0,0 +1,20 @@ | |||
| The MIT License (MIT) | |||
| Copyright (c) 2013 Ben Johnson | |||
| Permission is hereby granted, free of charge, to any person obtaining a copy of | |||
| this software and associated documentation files (the "Software"), to deal in | |||
| the Software without restriction, including without limitation the rights to | |||
| use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of | |||
| the Software, and to permit persons to whom the Software is furnished to do so, | |||
| subject to the following conditions: | |||
| The above copyright notice and this permission notice shall be included in all | |||
| copies or substantial portions of the Software. | |||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | |||
| FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | |||
| COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | |||
| IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |||
| CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |||
| @@ -0,0 +1,18 @@ | |||
| BRANCH=`git rev-parse --abbrev-ref HEAD` | |||
| COMMIT=`git rev-parse --short HEAD` | |||
| GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" | |||
| default: build | |||
| race: | |||
| @go test -v -race -test.run="TestSimulate_(100op|1000op)" | |||
| # go get github.com/kisielk/errcheck | |||
| errcheck: | |||
| @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt | |||
| test: | |||
| @go test -v -cover . | |||
| @go test -v ./cmd/bolt | |||
| .PHONY: fmt test | |||
| @@ -0,0 +1,848 @@ | |||
| Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt)  | |||
| ==== | |||
| Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] | |||
| [LMDB project][lmdb]. The goal of the project is to provide a simple, | |||
| fast, and reliable database for projects that don't require a full database | |||
| server such as Postgres or MySQL. | |||
| Since Bolt is meant to be used as such a low-level piece of functionality, | |||
| simplicity is key. The API will be small and only focus on getting values | |||
| and setting values. That's it. | |||
| [hyc_symas]: https://twitter.com/hyc_symas | |||
| [lmdb]: http://symas.com/mdb/ | |||
| ## Project Status | |||
| Bolt is stable and the API is fixed. Full unit test coverage and randomized | |||
| black box testing are used to ensure database consistency and thread safety. | |||
| Bolt is currently in high-load production environments serving databases as | |||
| large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed | |||
| services every day. | |||
| ## Table of Contents | |||
| - [Getting Started](#getting-started) | |||
| - [Installing](#installing) | |||
| - [Opening a database](#opening-a-database) | |||
| - [Transactions](#transactions) | |||
| - [Read-write transactions](#read-write-transactions) | |||
| - [Read-only transactions](#read-only-transactions) | |||
| - [Batch read-write transactions](#batch-read-write-transactions) | |||
| - [Managing transactions manually](#managing-transactions-manually) | |||
| - [Using buckets](#using-buckets) | |||
| - [Using key/value pairs](#using-keyvalue-pairs) | |||
| - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) | |||
| - [Iterating over keys](#iterating-over-keys) | |||
| - [Prefix scans](#prefix-scans) | |||
| - [Range scans](#range-scans) | |||
| - [ForEach()](#foreach) | |||
| - [Nested buckets](#nested-buckets) | |||
| - [Database backups](#database-backups) | |||
| - [Statistics](#statistics) | |||
| - [Read-Only Mode](#read-only-mode) | |||
| - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) | |||
| - [Resources](#resources) | |||
| - [Comparison with other databases](#comparison-with-other-databases) | |||
| - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) | |||
| - [LevelDB, RocksDB](#leveldb-rocksdb) | |||
| - [LMDB](#lmdb) | |||
| - [Caveats & Limitations](#caveats--limitations) | |||
| - [Reading the Source](#reading-the-source) | |||
| - [Other Projects Using Bolt](#other-projects-using-bolt) | |||
| ## Getting Started | |||
| ### Installing | |||
| To start using Bolt, install Go and run `go get`: | |||
| ```sh | |||
| $ go get github.com/boltdb/bolt/... | |||
| ``` | |||
| This will retrieve the library and install the `bolt` command line utility into | |||
| your `$GOBIN` path. | |||
| ### Opening a database | |||
| The top-level object in Bolt is a `DB`. It is represented as a single file on | |||
| your disk and represents a consistent snapshot of your data. | |||
| To open your database, simply use the `bolt.Open()` function: | |||
| ```go | |||
| package main | |||
| import ( | |||
| "log" | |||
| "github.com/boltdb/bolt" | |||
| ) | |||
| func main() { | |||
| // Open the my.db data file in your current directory. | |||
| // It will be created if it doesn't exist. | |||
| db, err := bolt.Open("my.db", 0600, nil) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| defer db.Close() | |||
| ... | |||
| } | |||
| ``` | |||
| Please note that Bolt obtains a file lock on the data file so multiple processes | |||
| cannot open the same database at the same time. Opening an already open Bolt | |||
| database will cause it to hang until the other process closes it. To prevent | |||
| an indefinite wait you can pass a timeout option to the `Open()` function: | |||
| ```go | |||
| db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) | |||
| ``` | |||
| ### Transactions | |||
| Bolt allows only one read-write transaction at a time but allows as many | |||
| read-only transactions as you want at a time. Each transaction has a consistent | |||
| view of the data as it existed when the transaction started. | |||
| Individual transactions and all objects created from them (e.g. buckets, keys) | |||
| are not thread safe. To work with data in multiple goroutines you must start | |||
| a transaction for each one or use locking to ensure only one goroutine accesses | |||
| a transaction at a time. Creating transaction from the `DB` is thread safe. | |||
| Read-only transactions and read-write transactions should not depend on one | |||
| another and generally shouldn't be opened simultaneously in the same goroutine. | |||
| This can cause a deadlock as the read-write transaction needs to periodically | |||
| re-map the data file but it cannot do so while a read-only transaction is open. | |||
| #### Read-write transactions | |||
| To start a read-write transaction, you can use the `DB.Update()` function: | |||
| ```go | |||
| err := db.Update(func(tx *bolt.Tx) error { | |||
| ... | |||
| return nil | |||
| }) | |||
| ``` | |||
| Inside the closure, you have a consistent view of the database. You commit the | |||
| transaction by returning `nil` at the end. You can also rollback the transaction | |||
| at any point by returning an error. All database operations are allowed inside | |||
| a read-write transaction. | |||
| Always check the return error as it will report any disk failures that can cause | |||
| your transaction to not complete. If you return an error within your closure | |||
| it will be passed through. | |||
| #### Read-only transactions | |||
| To start a read-only transaction, you can use the `DB.View()` function: | |||
| ```go | |||
| err := db.View(func(tx *bolt.Tx) error { | |||
| ... | |||
| return nil | |||
| }) | |||
| ``` | |||
| You also get a consistent view of the database within this closure, however, | |||
| no mutating operations are allowed within a read-only transaction. You can only | |||
| retrieve buckets, retrieve values, and copy the database within a read-only | |||
| transaction. | |||
| #### Batch read-write transactions | |||
| Each `DB.Update()` waits for disk to commit the writes. This overhead | |||
| can be minimized by combining multiple updates with the `DB.Batch()` | |||
| function: | |||
| ```go | |||
| err := db.Batch(func(tx *bolt.Tx) error { | |||
| ... | |||
| return nil | |||
| }) | |||
| ``` | |||
| Concurrent Batch calls are opportunistically combined into larger | |||
| transactions. Batch is only useful when there are multiple goroutines | |||
| calling it. | |||
| The trade-off is that `Batch` can call the given | |||
| function multiple times, if parts of the transaction fail. The | |||
| function must be idempotent and side effects must take effect only | |||
| after a successful return from `DB.Batch()`. | |||
| For example: don't display messages from inside the function, instead | |||
| set variables in the enclosing scope: | |||
| ```go | |||
| var id uint64 | |||
| err := db.Batch(func(tx *bolt.Tx) error { | |||
| // Find last key in bucket, decode as bigendian uint64, increment | |||
| // by one, encode back to []byte, and add new key. | |||
| ... | |||
| id = newValue | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| return ... | |||
| } | |||
| fmt.Println("Allocated ID %d", id) | |||
| ``` | |||
| #### Managing transactions manually | |||
| The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` | |||
| function. These helper functions will start the transaction, execute a function, | |||
| and then safely close your transaction if an error is returned. This is the | |||
| recommended way to use Bolt transactions. | |||
| However, sometimes you may want to manually start and end your transactions. | |||
| You can use the `Tx.Begin()` function directly but **please** be sure to close | |||
| the transaction. | |||
| ```go | |||
| // Start a writable transaction. | |||
| tx, err := db.Begin(true) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer tx.Rollback() | |||
| // Use the transaction... | |||
| _, err := tx.CreateBucket([]byte("MyBucket")) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Commit the transaction and check for error. | |||
| if err := tx.Commit(); err != nil { | |||
| return err | |||
| } | |||
| ``` | |||
| The first argument to `DB.Begin()` is a boolean stating if the transaction | |||
| should be writable. | |||
| ### Using buckets | |||
| Buckets are collections of key/value pairs within the database. All keys in a | |||
| bucket must be unique. You can create a bucket using the `DB.CreateBucket()` | |||
| function: | |||
| ```go | |||
| db.Update(func(tx *bolt.Tx) error { | |||
| b, err := tx.CreateBucket([]byte("MyBucket")) | |||
| if err != nil { | |||
| return fmt.Errorf("create bucket: %s", err) | |||
| } | |||
| return nil | |||
| }) | |||
| ``` | |||
| You can also create a bucket only if it doesn't exist by using the | |||
| `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this | |||
| function for all your top-level buckets after you open your database so you can | |||
| guarantee that they exist for future transactions. | |||
| To delete a bucket, simply call the `Tx.DeleteBucket()` function. | |||
| ### Using key/value pairs | |||
| To save a key/value pair to a bucket, use the `Bucket.Put()` function: | |||
| ```go | |||
| db.Update(func(tx *bolt.Tx) error { | |||
| b := tx.Bucket([]byte("MyBucket")) | |||
| err := b.Put([]byte("answer"), []byte("42")) | |||
| return err | |||
| }) | |||
| ``` | |||
| This will set the value of the `"answer"` key to `"42"` in the `MyBucket` | |||
| bucket. To retrieve this value, we can use the `Bucket.Get()` function: | |||
| ```go | |||
| db.View(func(tx *bolt.Tx) error { | |||
| b := tx.Bucket([]byte("MyBucket")) | |||
| v := b.Get([]byte("answer")) | |||
| fmt.Printf("The answer is: %s\n", v) | |||
| return nil | |||
| }) | |||
| ``` | |||
| The `Get()` function does not return an error because its operation is | |||
| guaranteed to work (unless there is some kind of system failure). If the key | |||
| exists then it will return its byte slice value. If it doesn't exist then it | |||
| will return `nil`. It's important to note that you can have a zero-length value | |||
| set to a key which is different than the key not existing. | |||
| Use the `Bucket.Delete()` function to delete a key from the bucket. | |||
| Please note that values returned from `Get()` are only valid while the | |||
| transaction is open. If you need to use a value outside of the transaction | |||
| then you must use `copy()` to copy it to another byte slice. | |||
| ### Autoincrementing integer for the bucket | |||
| By using the `NextSequence()` function, you can let Bolt determine a sequence | |||
| which can be used as the unique identifier for your key/value pairs. See the | |||
| example below. | |||
| ```go | |||
| // CreateUser saves u to the store. The new user ID is set on u once the data is persisted. | |||
| func (s *Store) CreateUser(u *User) error { | |||
| return s.db.Update(func(tx *bolt.Tx) error { | |||
| // Retrieve the users bucket. | |||
| // This should be created when the DB is first opened. | |||
| b := tx.Bucket([]byte("users")) | |||
| // Generate ID for the user. | |||
| // This returns an error only if the Tx is closed or not writeable. | |||
| // That can't happen in an Update() call so I ignore the error check. | |||
| id, _ = b.NextSequence() | |||
| u.ID = int(id) | |||
| // Marshal user data into bytes. | |||
| buf, err := json.Marshal(u) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Persist bytes to users bucket. | |||
| return b.Put(itob(u.ID), buf) | |||
| }) | |||
| } | |||
| // itob returns an 8-byte big endian representation of v. | |||
| func itob(v int) []byte { | |||
| b := make([]byte, 8) | |||
| binary.BigEndian.PutUint64(b, uint64(v)) | |||
| return b | |||
| } | |||
| type User struct { | |||
| ID int | |||
| ... | |||
| } | |||
| ``` | |||
| ### Iterating over keys | |||
| Bolt stores its keys in byte-sorted order within a bucket. This makes sequential | |||
| iteration over these keys extremely fast. To iterate over keys we'll use a | |||
| `Cursor`: | |||
| ```go | |||
| db.View(func(tx *bolt.Tx) error { | |||
| // Assume bucket exists and has keys | |||
| b := tx.Bucket([]byte("MyBucket")) | |||
| c := b.Cursor() | |||
| for k, v := c.First(); k != nil; k, v = c.Next() { | |||
| fmt.Printf("key=%s, value=%s\n", k, v) | |||
| } | |||
| return nil | |||
| }) | |||
| ``` | |||
| The cursor allows you to move to a specific point in the list of keys and move | |||
| forward or backward through the keys one at a time. | |||
| The following functions are available on the cursor: | |||
| ``` | |||
| First() Move to the first key. | |||
| Last() Move to the last key. | |||
| Seek() Move to a specific key. | |||
| Next() Move to the next key. | |||
| Prev() Move to the previous key. | |||
| ``` | |||
| Each of those functions has a return signature of `(key []byte, value []byte)`. | |||
| When you have iterated to the end of the cursor then `Next()` will return a | |||
| `nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` | |||
| before calling `Next()` or `Prev()`. If you do not seek to a position then | |||
| these functions will return a `nil` key. | |||
| During iteration, if the key is non-`nil` but the value is `nil`, that means | |||
| the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to | |||
| access the sub-bucket. | |||
| #### Prefix scans | |||
| To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: | |||
| ```go | |||
| db.View(func(tx *bolt.Tx) error { | |||
| // Assume bucket exists and has keys | |||
| c := tx.Bucket([]byte("MyBucket")).Cursor() | |||
| prefix := []byte("1234") | |||
| for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { | |||
| fmt.Printf("key=%s, value=%s\n", k, v) | |||
| } | |||
| return nil | |||
| }) | |||
| ``` | |||
| #### Range scans | |||
| Another common use case is scanning over a range such as a time range. If you | |||
| use a sortable time encoding such as RFC3339 then you can query a specific | |||
| date range like this: | |||
| ```go | |||
| db.View(func(tx *bolt.Tx) error { | |||
| // Assume our events bucket exists and has RFC3339 encoded time keys. | |||
| c := tx.Bucket([]byte("Events")).Cursor() | |||
| // Our time range spans the 90's decade. | |||
| min := []byte("1990-01-01T00:00:00Z") | |||
| max := []byte("2000-01-01T00:00:00Z") | |||
| // Iterate over the 90's. | |||
| for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { | |||
| fmt.Printf("%s: %s\n", k, v) | |||
| } | |||
| return nil | |||
| }) | |||
| ``` | |||
| Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. | |||
| #### ForEach() | |||
| You can also use the function `ForEach()` if you know you'll be iterating over | |||
| all the keys in a bucket: | |||
| ```go | |||
| db.View(func(tx *bolt.Tx) error { | |||
| // Assume bucket exists and has keys | |||
| b := tx.Bucket([]byte("MyBucket")) | |||
| b.ForEach(func(k, v []byte) error { | |||
| fmt.Printf("key=%s, value=%s\n", k, v) | |||
| return nil | |||
| }) | |||
| return nil | |||
| }) | |||
| ``` | |||
| ### Nested buckets | |||
| You can also store a bucket in a key to create nested buckets. The API is the | |||
| same as the bucket management API on the `DB` object: | |||
| ```go | |||
| func (*Bucket) CreateBucket(key []byte) (*Bucket, error) | |||
| func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) | |||
| func (*Bucket) DeleteBucket(key []byte) error | |||
| ``` | |||
| ### Database backups | |||
| Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` | |||
| function to write a consistent view of the database to a writer. If you call | |||
| this from a read-only transaction, it will perform a hot backup and not block | |||
| your other database reads and writes. | |||
| By default, it will use a regular file handle which will utilize the operating | |||
| system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) | |||
| documentation for information about optimizing for larger-than-RAM datasets. | |||
| One common use case is to backup over HTTP so you can use tools like `cURL` to | |||
| do database backups: | |||
| ```go | |||
| func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { | |||
| err := db.View(func(tx *bolt.Tx) error { | |||
| w.Header().Set("Content-Type", "application/octet-stream") | |||
| w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) | |||
| w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) | |||
| _, err := tx.WriteTo(w) | |||
| return err | |||
| }) | |||
| if err != nil { | |||
| http.Error(w, err.Error(), http.StatusInternalServerError) | |||
| } | |||
| } | |||
| ``` | |||
| Then you can backup using this command: | |||
| ```sh | |||
| $ curl http://localhost/backup > my.db | |||
| ``` | |||
| Or you can open your browser to `http://localhost/backup` and it will download | |||
| automatically. | |||
| If you want to backup to another file you can use the `Tx.CopyFile()` helper | |||
| function. | |||
| ### Statistics | |||
| The database keeps a running count of many of the internal operations it | |||
| performs so you can better understand what's going on. By grabbing a snapshot | |||
| of these stats at two points in time we can see what operations were performed | |||
| in that time range. | |||
| For example, we could start a goroutine to log stats every 10 seconds: | |||
| ```go | |||
| go func() { | |||
| // Grab the initial stats. | |||
| prev := db.Stats() | |||
| for { | |||
| // Wait for 10s. | |||
| time.Sleep(10 * time.Second) | |||
| // Grab the current stats and diff them. | |||
| stats := db.Stats() | |||
| diff := stats.Sub(&prev) | |||
| // Encode stats to JSON and print to STDERR. | |||
| json.NewEncoder(os.Stderr).Encode(diff) | |||
| // Save stats for the next loop. | |||
| prev = stats | |||
| } | |||
| }() | |||
| ``` | |||
| It's also useful to pipe these stats to a service such as statsd for monitoring | |||
| or to provide an HTTP endpoint that will perform a fixed-length sample. | |||
| ### Read-Only Mode | |||
| Sometimes it is useful to create a shared, read-only Bolt database. To this, | |||
| set the `Options.ReadOnly` flag when opening your database. Read-only mode | |||
| uses a shared lock to allow multiple processes to read from the database but | |||
| it will block any processes from opening the database in read-write mode. | |||
| ```go | |||
| db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| ``` | |||
| ### Mobile Use (iOS/Android) | |||
| Bolt is able to run on mobile devices by leveraging the binding feature of the | |||
| [gomobile](https://github.com/golang/mobile) tool. Create a struct that will | |||
| contain your database logic and a reference to a `*bolt.DB` with a initializing | |||
| contstructor that takes in a filepath where the database file will be stored. | |||
| Neither Android nor iOS require extra permissions or cleanup from using this method. | |||
| ```go | |||
| func NewBoltDB(filepath string) *BoltDB { | |||
| db, err := bolt.Open(filepath+"/demo.db", 0600, nil) | |||
| if err != nil { | |||
| log.Fatal(err) | |||
| } | |||
| return &BoltDB{db} | |||
| } | |||
| type BoltDB struct { | |||
| db *bolt.DB | |||
| ... | |||
| } | |||
| func (b *BoltDB) Path() string { | |||
| return b.db.Path() | |||
| } | |||
| func (b *BoltDB) Close() { | |||
| b.db.Close() | |||
| } | |||
| ``` | |||
| Database logic should be defined as methods on this wrapper struct. | |||
| To initialize this struct from the native language (both platforms now sync | |||
| their local storage to the cloud. These snippets disable that functionality for the | |||
| database file): | |||
| #### Android | |||
| ```java | |||
| String path; | |||
| if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ | |||
| path = getNoBackupFilesDir().getAbsolutePath(); | |||
| } else{ | |||
| path = getFilesDir().getAbsolutePath(); | |||
| } | |||
| Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) | |||
| ``` | |||
| #### iOS | |||
| ```objc | |||
| - (void)demo { | |||
| NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, | |||
| NSUserDomainMask, | |||
| YES) objectAtIndex:0]; | |||
| GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); | |||
| [self addSkipBackupAttributeToItemAtPath:demo.path]; | |||
| //Some DB Logic would go here | |||
| [demo close]; | |||
| } | |||
| - (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString | |||
| { | |||
| NSURL* URL= [NSURL fileURLWithPath: filePathString]; | |||
| assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); | |||
| NSError *error = nil; | |||
| BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] | |||
| forKey: NSURLIsExcludedFromBackupKey error: &error]; | |||
| if(!success){ | |||
| NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); | |||
| } | |||
| return success; | |||
| } | |||
| ``` | |||
| ## Resources | |||
| For more information on getting started with Bolt, check out the following articles: | |||
| * [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). | |||
| * [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville | |||
| ## Comparison with other databases | |||
| ### Postgres, MySQL, & other relational databases | |||
| Relational databases structure data into rows and are only accessible through | |||
| the use of SQL. This approach provides flexibility in how you store and query | |||
| your data but also incurs overhead in parsing and planning SQL statements. Bolt | |||
| accesses all data by a byte slice key. This makes Bolt fast to read and write | |||
| data by key but provides no built-in support for joining values together. | |||
| Most relational databases (with the exception of SQLite) are standalone servers | |||
| that run separately from your application. This gives your systems | |||
| flexibility to connect multiple application servers to a single database | |||
| server but also adds overhead in serializing and transporting data over the | |||
| network. Bolt runs as a library included in your application so all data access | |||
| has to go through your application's process. This brings data closer to your | |||
| application but limits multi-process access to the data. | |||
| ### LevelDB, RocksDB | |||
| LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that | |||
| they are libraries bundled into the application, however, their underlying | |||
| structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes | |||
| random writes by using a write ahead log and multi-tiered, sorted files called | |||
| SSTables. Bolt uses a B+tree internally and only a single file. Both approaches | |||
| have trade-offs. | |||
| If you require a high random write throughput (>10,000 w/sec) or you need to use | |||
| spinning disks then LevelDB could be a good choice. If your application is | |||
| read-heavy or does a lot of range scans then Bolt could be a good choice. | |||
| One other important consideration is that LevelDB does not have transactions. | |||
| It supports batch writing of key/values pairs and it supports read snapshots | |||
| but it will not give you the ability to do a compare-and-swap operation safely. | |||
| Bolt supports fully serializable ACID transactions. | |||
| ### LMDB | |||
| Bolt was originally a port of LMDB so it is architecturally similar. Both use | |||
| a B+tree, have ACID semantics with fully serializable transactions, and support | |||
| lock-free MVCC using a single writer and multiple readers. | |||
| The two projects have somewhat diverged. LMDB heavily focuses on raw performance | |||
| while Bolt has focused on simplicity and ease of use. For example, LMDB allows | |||
| several unsafe actions such as direct writes for the sake of performance. Bolt | |||
| opts to disallow actions which can leave the database in a corrupted state. The | |||
| only exception to this in Bolt is `DB.NoSync`. | |||
| There are also a few differences in API. LMDB requires a maximum mmap size when | |||
| opening an `mdb_env` whereas Bolt will handle incremental mmap resizing | |||
| automatically. LMDB overloads the getter and setter functions with multiple | |||
| flags whereas Bolt splits these specialized cases into their own functions. | |||
| ## Caveats & Limitations | |||
| It's important to pick the right tool for the job and Bolt is no exception. | |||
| Here are a few things to note when evaluating and using Bolt: | |||
| * Bolt is good for read intensive workloads. Sequential write performance is | |||
| also fast but random writes can be slow. You can use `DB.Batch()` or add a | |||
| write-ahead log to help mitigate this issue. | |||
| * Bolt uses a B+tree internally so there can be a lot of random page access. | |||
| SSDs provide a significant performance boost over spinning disks. | |||
| * Try to avoid long running read transactions. Bolt uses copy-on-write so | |||
| old pages cannot be reclaimed while an old transaction is using them. | |||
| * Byte slices returned from Bolt are only valid during a transaction. Once the | |||
| transaction has been committed or rolled back then the memory they point to | |||
| can be reused by a new page or can be unmapped from virtual memory and you'll | |||
| see an `unexpected fault address` panic when accessing it. | |||
| * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for | |||
| buckets that have random inserts will cause your database to have very poor | |||
| page utilization. | |||
| * Use larger buckets in general. Smaller buckets causes poor page utilization | |||
| once they become larger than the page size (typically 4KB). | |||
| * Bulk loading a lot of random writes into a new bucket can be slow as the | |||
| page will not split until the transaction is committed. Randomly inserting | |||
| more than 100,000 key/value pairs into a single new bucket in a single | |||
| transaction is not advised. | |||
| * Bolt uses a memory-mapped file so the underlying operating system handles the | |||
| caching of the data. Typically, the OS will cache as much of the file as it | |||
| can in memory and will release memory as needed to other processes. This means | |||
| that Bolt can show very high memory usage when working with large databases. | |||
| However, this is expected and the OS will release memory as needed. Bolt can | |||
| handle databases much larger than the available physical RAM, provided its | |||
| memory-map fits in the process virtual address space. It may be problematic | |||
| on 32-bits systems. | |||
| * The data structures in the Bolt database are memory mapped so the data file | |||
| will be endian specific. This means that you cannot copy a Bolt file from a | |||
| little endian machine to a big endian machine and have it work. For most | |||
| users this is not a concern since most modern CPUs are little endian. | |||
| * Because of the way pages are laid out on disk, Bolt cannot truncate data files | |||
| and return free pages back to the disk. Instead, Bolt maintains a free list | |||
| of unused pages within its data file. These free pages can be reused by later | |||
| transactions. This works well for many use cases as databases generally tend | |||
| to grow. However, it's important to note that deleting large chunks of data | |||
| will not allow you to reclaim that space on disk. | |||
| For more information on page allocation, [see this comment][page-allocation]. | |||
| [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 | |||
| ## Reading the Source | |||
| Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, | |||
| transactional key/value database so it can be a good starting point for people | |||
| interested in how databases work. | |||
| The best places to start are the main entry points into Bolt: | |||
| - `Open()` - Initializes the reference to the database. It's responsible for | |||
| creating the database if it doesn't exist, obtaining an exclusive lock on the | |||
| file, reading the meta pages, & memory-mapping the file. | |||
| - `DB.Begin()` - Starts a read-only or read-write transaction depending on the | |||
| value of the `writable` argument. This requires briefly obtaining the "meta" | |||
| lock to keep track of open transactions. Only one read-write transaction can | |||
| exist at a time so the "rwlock" is acquired during the life of a read-write | |||
| transaction. | |||
| - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the | |||
| arguments, a cursor is used to traverse the B+tree to the page and position | |||
| where they key & value will be written. Once the position is found, the bucket | |||
| materializes the underlying page and the page's parent pages into memory as | |||
| "nodes". These nodes are where mutations occur during read-write transactions. | |||
| These changes get flushed to disk during commit. | |||
| - `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor | |||
| to move to the page & position of a key/value pair. During a read-only | |||
| transaction, the key and value data is returned as a direct reference to the | |||
| underlying mmap file so there's no allocation overhead. For read-write | |||
| transactions, this data may reference the mmap file or one of the in-memory | |||
| node values. | |||
| - `Cursor` - This object is simply for traversing the B+tree of on-disk pages | |||
| or in-memory nodes. It can seek to a specific key, move to the first or last | |||
| value, or it can move forward or backward. The cursor handles the movement up | |||
| and down the B+tree transparently to the end user. | |||
| - `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages | |||
| into pages to be written to disk. Writing to disk then occurs in two phases. | |||
| First, the dirty pages are written to disk and an `fsync()` occurs. Second, a | |||
| new meta page with an incremented transaction ID is written and another | |||
| `fsync()` occurs. This two phase write ensures that partially written data | |||
| pages are ignored in the event of a crash since the meta page pointing to them | |||
| is never written. Partially written meta pages are invalidated because they | |||
| are written with a checksum. | |||
| If you have additional notes that could be helpful for others, please submit | |||
| them via pull request. | |||
| ## Other Projects Using Bolt | |||
| Below is a list of public, open source projects that use Bolt: | |||
| * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. | |||
| * [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. | |||
| * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. | |||
| * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. | |||
| * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. | |||
| * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. | |||
| * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. | |||
| * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. | |||
| * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". | |||
| * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. | |||
| * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. | |||
| * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. | |||
| * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. | |||
| * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. | |||
| * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. | |||
| * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. | |||
| * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. | |||
| * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. | |||
| * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. | |||
| * [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. | |||
| * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. | |||
| * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. | |||
| * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. | |||
| * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. | |||
| * [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. | |||
| * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. | |||
| * [stow](https://github.com/djherbis/stow) - a persistence manager for objects | |||
| backed by boltdb. | |||
| * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining | |||
| simple tx and key scans. | |||
| * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. | |||
| * [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service | |||
| * [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. | |||
| * [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. | |||
| * [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. | |||
| * [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB. | |||
| * [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. | |||
| If you are using Bolt in a project please send a pull request to add it to the list. | |||
| @@ -0,0 +1,18 @@ | |||
| version: "{build}" | |||
| os: Windows Server 2012 R2 | |||
| clone_folder: c:\gopath\src\github.com\boltdb\bolt | |||
| environment: | |||
| GOPATH: c:\gopath | |||
| install: | |||
| - echo %PATH% | |||
| - echo %GOPATH% | |||
| - go version | |||
| - go env | |||
| - go get -v -t ./... | |||
| build_script: | |||
| - go test -v ./... | |||
| @@ -0,0 +1,7 @@ | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0x7FFFFFFF // 2GB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0xFFFFFFF | |||
| @@ -0,0 +1,7 @@ | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0xFFFFFFFFFFFF // 256TB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0x7FFFFFFF | |||
| @@ -0,0 +1,7 @@ | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0x7FFFFFFF // 2GB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0xFFFFFFF | |||
| @@ -0,0 +1,9 @@ | |||
| // +build arm64 | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0xFFFFFFFFFFFF // 256TB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0x7FFFFFFF | |||
| @@ -0,0 +1,10 @@ | |||
| package bolt | |||
| import ( | |||
| "syscall" | |||
| ) | |||
| // fdatasync flushes written data to a file descriptor. | |||
| func fdatasync(db *DB) error { | |||
| return syscall.Fdatasync(int(db.file.Fd())) | |||
| } | |||
| @@ -0,0 +1,27 @@ | |||
| package bolt | |||
| import ( | |||
| "syscall" | |||
| "unsafe" | |||
| ) | |||
| const ( | |||
| msAsync = 1 << iota // perform asynchronous writes | |||
| msSync // perform synchronous writes | |||
| msInvalidate // invalidate cached data | |||
| ) | |||
| func msync(db *DB) error { | |||
| _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) | |||
| if errno != 0 { | |||
| return errno | |||
| } | |||
| return nil | |||
| } | |||
| func fdatasync(db *DB) error { | |||
| if db.data != nil { | |||
| return msync(db) | |||
| } | |||
| return db.file.Sync() | |||
| } | |||
| @@ -0,0 +1,9 @@ | |||
| // +build ppc | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0x7FFFFFFF // 2GB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0xFFFFFFF | |||
| @@ -0,0 +1,9 @@ | |||
| // +build ppc64 | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0xFFFFFFFFFFFF // 256TB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0x7FFFFFFF | |||
| @@ -0,0 +1,9 @@ | |||
| // +build ppc64le | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0xFFFFFFFFFFFF // 256TB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0x7FFFFFFF | |||
| @@ -0,0 +1,9 @@ | |||
| // +build s390x | |||
| package bolt | |||
| // maxMapSize represents the largest mmap size supported by Bolt. | |||
| const maxMapSize = 0xFFFFFFFFFFFF // 256TB | |||
| // maxAllocSize is the size used when creating array pointers. | |||
| const maxAllocSize = 0x7FFFFFFF | |||
| @@ -0,0 +1,89 @@ | |||
| // +build !windows,!plan9,!solaris | |||
| package bolt | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "syscall" | |||
| "time" | |||
| "unsafe" | |||
| ) | |||
| // flock acquires an advisory lock on a file descriptor. | |||
| func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { | |||
| var t time.Time | |||
| for { | |||
| // If we're beyond our timeout then return an error. | |||
| // This can only occur after we've attempted a flock once. | |||
| if t.IsZero() { | |||
| t = time.Now() | |||
| } else if timeout > 0 && time.Since(t) > timeout { | |||
| return ErrTimeout | |||
| } | |||
| flag := syscall.LOCK_SH | |||
| if exclusive { | |||
| flag = syscall.LOCK_EX | |||
| } | |||
| // Otherwise attempt to obtain an exclusive lock. | |||
| err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) | |||
| if err == nil { | |||
| return nil | |||
| } else if err != syscall.EWOULDBLOCK { | |||
| return err | |||
| } | |||
| // Wait for a bit and try again. | |||
| time.Sleep(50 * time.Millisecond) | |||
| } | |||
| } | |||
| // funlock releases an advisory lock on a file descriptor. | |||
| func funlock(db *DB) error { | |||
| return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) | |||
| } | |||
| // mmap memory maps a DB's data file. | |||
| func mmap(db *DB, sz int) error { | |||
| // Map the data file to memory. | |||
| b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Advise the kernel that the mmap is accessed randomly. | |||
| if err := madvise(b, syscall.MADV_RANDOM); err != nil { | |||
| return fmt.Errorf("madvise: %s", err) | |||
| } | |||
| // Save the original byte slice and convert to a byte array pointer. | |||
| db.dataref = b | |||
| db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) | |||
| db.datasz = sz | |||
| return nil | |||
| } | |||
| // munmap unmaps a DB's data file from memory. | |||
| func munmap(db *DB) error { | |||
| // Ignore the unmap if we have no mapped data. | |||
| if db.dataref == nil { | |||
| return nil | |||
| } | |||
| // Unmap using the original byte slice. | |||
| err := syscall.Munmap(db.dataref) | |||
| db.dataref = nil | |||
| db.data = nil | |||
| db.datasz = 0 | |||
| return err | |||
| } | |||
| // NOTE: This function is copied from stdlib because it is not available on darwin. | |||
| func madvise(b []byte, advice int) (err error) { | |||
| _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) | |||
| if e1 != 0 { | |||
| err = e1 | |||
| } | |||
| return | |||
| } | |||
| @@ -0,0 +1,90 @@ | |||
| package bolt | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "syscall" | |||
| "time" | |||
| "unsafe" | |||
| "golang.org/x/sys/unix" | |||
| ) | |||
| // flock acquires an advisory lock on a file descriptor. | |||
| func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { | |||
| var t time.Time | |||
| for { | |||
| // If we're beyond our timeout then return an error. | |||
| // This can only occur after we've attempted a flock once. | |||
| if t.IsZero() { | |||
| t = time.Now() | |||
| } else if timeout > 0 && time.Since(t) > timeout { | |||
| return ErrTimeout | |||
| } | |||
| var lock syscall.Flock_t | |||
| lock.Start = 0 | |||
| lock.Len = 0 | |||
| lock.Pid = 0 | |||
| lock.Whence = 0 | |||
| lock.Pid = 0 | |||
| if exclusive { | |||
| lock.Type = syscall.F_WRLCK | |||
| } else { | |||
| lock.Type = syscall.F_RDLCK | |||
| } | |||
| err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) | |||
| if err == nil { | |||
| return nil | |||
| } else if err != syscall.EAGAIN { | |||
| return err | |||
| } | |||
| // Wait for a bit and try again. | |||
| time.Sleep(50 * time.Millisecond) | |||
| } | |||
| } | |||
| // funlock releases an advisory lock on a file descriptor. | |||
| func funlock(db *DB) error { | |||
| var lock syscall.Flock_t | |||
| lock.Start = 0 | |||
| lock.Len = 0 | |||
| lock.Type = syscall.F_UNLCK | |||
| lock.Whence = 0 | |||
| return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) | |||
| } | |||
| // mmap memory maps a DB's data file. | |||
| func mmap(db *DB, sz int) error { | |||
| // Map the data file to memory. | |||
| b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Advise the kernel that the mmap is accessed randomly. | |||
| if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { | |||
| return fmt.Errorf("madvise: %s", err) | |||
| } | |||
| // Save the original byte slice and convert to a byte array pointer. | |||
| db.dataref = b | |||
| db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) | |||
| db.datasz = sz | |||
| return nil | |||
| } | |||
| // munmap unmaps a DB's data file from memory. | |||
| func munmap(db *DB) error { | |||
| // Ignore the unmap if we have no mapped data. | |||
| if db.dataref == nil { | |||
| return nil | |||
| } | |||
| // Unmap using the original byte slice. | |||
| err := unix.Munmap(db.dataref) | |||
| db.dataref = nil | |||
| db.data = nil | |||
| db.datasz = 0 | |||
| return err | |||
| } | |||
| @@ -0,0 +1,144 @@ | |||
| package bolt | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "syscall" | |||
| "time" | |||
| "unsafe" | |||
| ) | |||
| // LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 | |||
| var ( | |||
| modkernel32 = syscall.NewLazyDLL("kernel32.dll") | |||
| procLockFileEx = modkernel32.NewProc("LockFileEx") | |||
| procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") | |||
| ) | |||
| const ( | |||
| lockExt = ".lock" | |||
| // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx | |||
| flagLockExclusive = 2 | |||
| flagLockFailImmediately = 1 | |||
| // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx | |||
| errLockViolation syscall.Errno = 0x21 | |||
| ) | |||
| func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { | |||
| r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) | |||
| if r == 0 { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { | |||
| r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) | |||
| if r == 0 { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // fdatasync flushes written data to a file descriptor. | |||
| func fdatasync(db *DB) error { | |||
| return db.file.Sync() | |||
| } | |||
| // flock acquires an advisory lock on a file descriptor. | |||
| func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { | |||
| // Create a separate lock file on windows because a process | |||
| // cannot share an exclusive lock on the same file. This is | |||
| // needed during Tx.WriteTo(). | |||
| f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| db.lockfile = f | |||
| var t time.Time | |||
| for { | |||
| // If we're beyond our timeout then return an error. | |||
| // This can only occur after we've attempted a flock once. | |||
| if t.IsZero() { | |||
| t = time.Now() | |||
| } else if timeout > 0 && time.Since(t) > timeout { | |||
| return ErrTimeout | |||
| } | |||
| var flag uint32 = flagLockFailImmediately | |||
| if exclusive { | |||
| flag |= flagLockExclusive | |||
| } | |||
| err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) | |||
| if err == nil { | |||
| return nil | |||
| } else if err != errLockViolation { | |||
| return err | |||
| } | |||
| // Wait for a bit and try again. | |||
| time.Sleep(50 * time.Millisecond) | |||
| } | |||
| } | |||
| // funlock releases an advisory lock on a file descriptor. | |||
| func funlock(db *DB) error { | |||
| err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) | |||
| db.lockfile.Close() | |||
| os.Remove(db.path+lockExt) | |||
| return err | |||
| } | |||
| // mmap memory maps a DB's data file. | |||
| // Based on: https://github.com/edsrzf/mmap-go | |||
| func mmap(db *DB, sz int) error { | |||
| if !db.readOnly { | |||
| // Truncate the database to the size of the mmap. | |||
| if err := db.file.Truncate(int64(sz)); err != nil { | |||
| return fmt.Errorf("truncate: %s", err) | |||
| } | |||
| } | |||
| // Open a file mapping handle. | |||
| sizelo := uint32(sz >> 32) | |||
| sizehi := uint32(sz) & 0xffffffff | |||
| h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) | |||
| if h == 0 { | |||
| return os.NewSyscallError("CreateFileMapping", errno) | |||
| } | |||
| // Create the memory map. | |||
| addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) | |||
| if addr == 0 { | |||
| return os.NewSyscallError("MapViewOfFile", errno) | |||
| } | |||
| // Close mapping handle. | |||
| if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { | |||
| return os.NewSyscallError("CloseHandle", err) | |||
| } | |||
| // Convert to a byte array. | |||
| db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) | |||
| db.datasz = sz | |||
| return nil | |||
| } | |||
| // munmap unmaps a pointer from a file. | |||
| // Based on: https://github.com/edsrzf/mmap-go | |||
| func munmap(db *DB) error { | |||
| if db.data == nil { | |||
| return nil | |||
| } | |||
| addr := (uintptr)(unsafe.Pointer(&db.data[0])) | |||
| if err := syscall.UnmapViewOfFile(addr); err != nil { | |||
| return os.NewSyscallError("UnmapViewOfFile", err) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,8 @@ | |||
| // +build !windows,!plan9,!linux,!openbsd | |||
| package bolt | |||
| // fdatasync flushes written data to a file descriptor. | |||
| func fdatasync(db *DB) error { | |||
| return db.file.Sync() | |||
| } | |||
| @@ -0,0 +1,748 @@ | |||
| package bolt | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "unsafe" | |||
| ) | |||
| const ( | |||
| // MaxKeySize is the maximum length of a key, in bytes. | |||
| MaxKeySize = 32768 | |||
| // MaxValueSize is the maximum length of a value, in bytes. | |||
| MaxValueSize = (1 << 31) - 2 | |||
| ) | |||
| const ( | |||
| maxUint = ^uint(0) | |||
| minUint = 0 | |||
| maxInt = int(^uint(0) >> 1) | |||
| minInt = -maxInt - 1 | |||
| ) | |||
| const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) | |||
| const ( | |||
| minFillPercent = 0.1 | |||
| maxFillPercent = 1.0 | |||
| ) | |||
| // DefaultFillPercent is the percentage that split pages are filled. | |||
| // This value can be changed by setting Bucket.FillPercent. | |||
| const DefaultFillPercent = 0.5 | |||
| // Bucket represents a collection of key/value pairs inside the database. | |||
| type Bucket struct { | |||
| *bucket | |||
| tx *Tx // the associated transaction | |||
| buckets map[string]*Bucket // subbucket cache | |||
| page *page // inline page reference | |||
| rootNode *node // materialized node for the root page. | |||
| nodes map[pgid]*node // node cache | |||
| // Sets the threshold for filling nodes when they split. By default, | |||
| // the bucket will fill to 50% but it can be useful to increase this | |||
| // amount if you know that your write workloads are mostly append-only. | |||
| // | |||
| // This is non-persisted across transactions so it must be set in every Tx. | |||
| FillPercent float64 | |||
| } | |||
| // bucket represents the on-file representation of a bucket. | |||
| // This is stored as the "value" of a bucket key. If the bucket is small enough, | |||
| // then its root page can be stored inline in the "value", after the bucket | |||
| // header. In the case of inline buckets, the "root" will be 0. | |||
| type bucket struct { | |||
| root pgid // page id of the bucket's root-level page | |||
| sequence uint64 // monotonically incrementing, used by NextSequence() | |||
| } | |||
| // newBucket returns a new bucket associated with a transaction. | |||
| func newBucket(tx *Tx) Bucket { | |||
| var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} | |||
| if tx.writable { | |||
| b.buckets = make(map[string]*Bucket) | |||
| b.nodes = make(map[pgid]*node) | |||
| } | |||
| return b | |||
| } | |||
| // Tx returns the tx of the bucket. | |||
| func (b *Bucket) Tx() *Tx { | |||
| return b.tx | |||
| } | |||
| // Root returns the root of the bucket. | |||
| func (b *Bucket) Root() pgid { | |||
| return b.root | |||
| } | |||
| // Writable returns whether the bucket is writable. | |||
| func (b *Bucket) Writable() bool { | |||
| return b.tx.writable | |||
| } | |||
| // Cursor creates a cursor associated with the bucket. | |||
| // The cursor is only valid as long as the transaction is open. | |||
| // Do not use a cursor after the transaction is closed. | |||
| func (b *Bucket) Cursor() *Cursor { | |||
| // Update transaction statistics. | |||
| b.tx.stats.CursorCount++ | |||
| // Allocate and return a cursor. | |||
| return &Cursor{ | |||
| bucket: b, | |||
| stack: make([]elemRef, 0), | |||
| } | |||
| } | |||
| // Bucket retrieves a nested bucket by name. | |||
| // Returns nil if the bucket does not exist. | |||
| // The bucket instance is only valid for the lifetime of the transaction. | |||
| func (b *Bucket) Bucket(name []byte) *Bucket { | |||
| if b.buckets != nil { | |||
| if child := b.buckets[string(name)]; child != nil { | |||
| return child | |||
| } | |||
| } | |||
| // Move cursor to key. | |||
| c := b.Cursor() | |||
| k, v, flags := c.seek(name) | |||
| // Return nil if the key doesn't exist or it is not a bucket. | |||
| if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { | |||
| return nil | |||
| } | |||
| // Otherwise create a bucket and cache it. | |||
| var child = b.openBucket(v) | |||
| if b.buckets != nil { | |||
| b.buckets[string(name)] = child | |||
| } | |||
| return child | |||
| } | |||
| // Helper method that re-interprets a sub-bucket value | |||
| // from a parent into a Bucket | |||
| func (b *Bucket) openBucket(value []byte) *Bucket { | |||
| var child = newBucket(b.tx) | |||
| // If this is a writable transaction then we need to copy the bucket entry. | |||
| // Read-only transactions can point directly at the mmap entry. | |||
| if b.tx.writable { | |||
| child.bucket = &bucket{} | |||
| *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) | |||
| } else { | |||
| child.bucket = (*bucket)(unsafe.Pointer(&value[0])) | |||
| } | |||
| // Save a reference to the inline page if the bucket is inline. | |||
| if child.root == 0 { | |||
| child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) | |||
| } | |||
| return &child | |||
| } | |||
| // CreateBucket creates a new bucket at the given key and returns the new bucket. | |||
| // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. | |||
| // The bucket instance is only valid for the lifetime of the transaction. | |||
| func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { | |||
| if b.tx.db == nil { | |||
| return nil, ErrTxClosed | |||
| } else if !b.tx.writable { | |||
| return nil, ErrTxNotWritable | |||
| } else if len(key) == 0 { | |||
| return nil, ErrBucketNameRequired | |||
| } | |||
| // Move cursor to correct position. | |||
| c := b.Cursor() | |||
| k, _, flags := c.seek(key) | |||
| // Return an error if there is an existing key. | |||
| if bytes.Equal(key, k) { | |||
| if (flags & bucketLeafFlag) != 0 { | |||
| return nil, ErrBucketExists | |||
| } else { | |||
| return nil, ErrIncompatibleValue | |||
| } | |||
| } | |||
| // Create empty, inline bucket. | |||
| var bucket = Bucket{ | |||
| bucket: &bucket{}, | |||
| rootNode: &node{isLeaf: true}, | |||
| FillPercent: DefaultFillPercent, | |||
| } | |||
| var value = bucket.write() | |||
| // Insert into node. | |||
| key = cloneBytes(key) | |||
| c.node().put(key, key, value, 0, bucketLeafFlag) | |||
| // Since subbuckets are not allowed on inline buckets, we need to | |||
| // dereference the inline page, if it exists. This will cause the bucket | |||
| // to be treated as a regular, non-inline bucket for the rest of the tx. | |||
| b.page = nil | |||
| return b.Bucket(key), nil | |||
| } | |||
| // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. | |||
| // Returns an error if the bucket name is blank, or if the bucket name is too long. | |||
| // The bucket instance is only valid for the lifetime of the transaction. | |||
| func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { | |||
| child, err := b.CreateBucket(key) | |||
| if err == ErrBucketExists { | |||
| return b.Bucket(key), nil | |||
| } else if err != nil { | |||
| return nil, err | |||
| } | |||
| return child, nil | |||
| } | |||
| // DeleteBucket deletes a bucket at the given key. | |||
| // Returns an error if the bucket does not exists, or if the key represents a non-bucket value. | |||
| func (b *Bucket) DeleteBucket(key []byte) error { | |||
| if b.tx.db == nil { | |||
| return ErrTxClosed | |||
| } else if !b.Writable() { | |||
| return ErrTxNotWritable | |||
| } | |||
| // Move cursor to correct position. | |||
| c := b.Cursor() | |||
| k, _, flags := c.seek(key) | |||
| // Return an error if bucket doesn't exist or is not a bucket. | |||
| if !bytes.Equal(key, k) { | |||
| return ErrBucketNotFound | |||
| } else if (flags & bucketLeafFlag) == 0 { | |||
| return ErrIncompatibleValue | |||
| } | |||
| // Recursively delete all child buckets. | |||
| child := b.Bucket(key) | |||
| err := child.ForEach(func(k, v []byte) error { | |||
| if v == nil { | |||
| if err := child.DeleteBucket(k); err != nil { | |||
| return fmt.Errorf("delete bucket: %s", err) | |||
| } | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Remove cached copy. | |||
| delete(b.buckets, string(key)) | |||
| // Release all bucket pages to freelist. | |||
| child.nodes = nil | |||
| child.rootNode = nil | |||
| child.free() | |||
| // Delete the node if we have a matching key. | |||
| c.node().del(key) | |||
| return nil | |||
| } | |||
| // Get retrieves the value for a key in the bucket. | |||
| // Returns a nil value if the key does not exist or if the key is a nested bucket. | |||
| // The returned value is only valid for the life of the transaction. | |||
| func (b *Bucket) Get(key []byte) []byte { | |||
| k, v, flags := b.Cursor().seek(key) | |||
| // Return nil if this is a bucket. | |||
| if (flags & bucketLeafFlag) != 0 { | |||
| return nil | |||
| } | |||
| // If our target node isn't the same key as what's passed in then return nil. | |||
| if !bytes.Equal(key, k) { | |||
| return nil | |||
| } | |||
| return v | |||
| } | |||
| // Put sets the value for a key in the bucket. | |||
| // If the key exist then its previous value will be overwritten. | |||
| // Supplied value must remain valid for the life of the transaction. | |||
| // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. | |||
| func (b *Bucket) Put(key []byte, value []byte) error { | |||
| if b.tx.db == nil { | |||
| return ErrTxClosed | |||
| } else if !b.Writable() { | |||
| return ErrTxNotWritable | |||
| } else if len(key) == 0 { | |||
| return ErrKeyRequired | |||
| } else if len(key) > MaxKeySize { | |||
| return ErrKeyTooLarge | |||
| } else if int64(len(value)) > MaxValueSize { | |||
| return ErrValueTooLarge | |||
| } | |||
| // Move cursor to correct position. | |||
| c := b.Cursor() | |||
| k, _, flags := c.seek(key) | |||
| // Return an error if there is an existing key with a bucket value. | |||
| if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { | |||
| return ErrIncompatibleValue | |||
| } | |||
| // Insert into node. | |||
| key = cloneBytes(key) | |||
| c.node().put(key, key, value, 0, 0) | |||
| return nil | |||
| } | |||
| // Delete removes a key from the bucket. | |||
| // If the key does not exist then nothing is done and a nil error is returned. | |||
| // Returns an error if the bucket was created from a read-only transaction. | |||
| func (b *Bucket) Delete(key []byte) error { | |||
| if b.tx.db == nil { | |||
| return ErrTxClosed | |||
| } else if !b.Writable() { | |||
| return ErrTxNotWritable | |||
| } | |||
| // Move cursor to correct position. | |||
| c := b.Cursor() | |||
| _, _, flags := c.seek(key) | |||
| // Return an error if there is already existing bucket value. | |||
| if (flags & bucketLeafFlag) != 0 { | |||
| return ErrIncompatibleValue | |||
| } | |||
| // Delete the node if we have a matching key. | |||
| c.node().del(key) | |||
| return nil | |||
| } | |||
| // NextSequence returns an autoincrementing integer for the bucket. | |||
| func (b *Bucket) NextSequence() (uint64, error) { | |||
| if b.tx.db == nil { | |||
| return 0, ErrTxClosed | |||
| } else if !b.Writable() { | |||
| return 0, ErrTxNotWritable | |||
| } | |||
| // Materialize the root node if it hasn't been already so that the | |||
| // bucket will be saved during commit. | |||
| if b.rootNode == nil { | |||
| _ = b.node(b.root, nil) | |||
| } | |||
| // Increment and return the sequence. | |||
| b.bucket.sequence++ | |||
| return b.bucket.sequence, nil | |||
| } | |||
| // ForEach executes a function for each key/value pair in a bucket. | |||
| // If the provided function returns an error then the iteration is stopped and | |||
| // the error is returned to the caller. The provided function must not modify | |||
| // the bucket; this will result in undefined behavior. | |||
| func (b *Bucket) ForEach(fn func(k, v []byte) error) error { | |||
| if b.tx.db == nil { | |||
| return ErrTxClosed | |||
| } | |||
| c := b.Cursor() | |||
| for k, v := c.First(); k != nil; k, v = c.Next() { | |||
| if err := fn(k, v); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // Stat returns stats on a bucket. | |||
| func (b *Bucket) Stats() BucketStats { | |||
| var s, subStats BucketStats | |||
| pageSize := b.tx.db.pageSize | |||
| s.BucketN += 1 | |||
| if b.root == 0 { | |||
| s.InlineBucketN += 1 | |||
| } | |||
| b.forEachPage(func(p *page, depth int) { | |||
| if (p.flags & leafPageFlag) != 0 { | |||
| s.KeyN += int(p.count) | |||
| // used totals the used bytes for the page | |||
| used := pageHeaderSize | |||
| if p.count != 0 { | |||
| // If page has any elements, add all element headers. | |||
| used += leafPageElementSize * int(p.count-1) | |||
| // Add all element key, value sizes. | |||
| // The computation takes advantage of the fact that the position | |||
| // of the last element's key/value equals to the total of the sizes | |||
| // of all previous elements' keys and values. | |||
| // It also includes the last element's header. | |||
| lastElement := p.leafPageElement(p.count - 1) | |||
| used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) | |||
| } | |||
| if b.root == 0 { | |||
| // For inlined bucket just update the inline stats | |||
| s.InlineBucketInuse += used | |||
| } else { | |||
| // For non-inlined bucket update all the leaf stats | |||
| s.LeafPageN++ | |||
| s.LeafInuse += used | |||
| s.LeafOverflowN += int(p.overflow) | |||
| // Collect stats from sub-buckets. | |||
| // Do that by iterating over all element headers | |||
| // looking for the ones with the bucketLeafFlag. | |||
| for i := uint16(0); i < p.count; i++ { | |||
| e := p.leafPageElement(i) | |||
| if (e.flags & bucketLeafFlag) != 0 { | |||
| // For any bucket element, open the element value | |||
| // and recursively call Stats on the contained bucket. | |||
| subStats.Add(b.openBucket(e.value()).Stats()) | |||
| } | |||
| } | |||
| } | |||
| } else if (p.flags & branchPageFlag) != 0 { | |||
| s.BranchPageN++ | |||
| lastElement := p.branchPageElement(p.count - 1) | |||
| // used totals the used bytes for the page | |||
| // Add header and all element headers. | |||
| used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) | |||
| // Add size of all keys and values. | |||
| // Again, use the fact that last element's position equals to | |||
| // the total of key, value sizes of all previous elements. | |||
| used += int(lastElement.pos + lastElement.ksize) | |||
| s.BranchInuse += used | |||
| s.BranchOverflowN += int(p.overflow) | |||
| } | |||
| // Keep track of maximum page depth. | |||
| if depth+1 > s.Depth { | |||
| s.Depth = (depth + 1) | |||
| } | |||
| }) | |||
| // Alloc stats can be computed from page counts and pageSize. | |||
| s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize | |||
| s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize | |||
| // Add the max depth of sub-buckets to get total nested depth. | |||
| s.Depth += subStats.Depth | |||
| // Add the stats for all sub-buckets | |||
| s.Add(subStats) | |||
| return s | |||
| } | |||
| // forEachPage iterates over every page in a bucket, including inline pages. | |||
| func (b *Bucket) forEachPage(fn func(*page, int)) { | |||
| // If we have an inline page then just use that. | |||
| if b.page != nil { | |||
| fn(b.page, 0) | |||
| return | |||
| } | |||
| // Otherwise traverse the page hierarchy. | |||
| b.tx.forEachPage(b.root, 0, fn) | |||
| } | |||
| // forEachPageNode iterates over every page (or node) in a bucket. | |||
| // This also includes inline pages. | |||
| func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { | |||
| // If we have an inline page or root node then just use that. | |||
| if b.page != nil { | |||
| fn(b.page, nil, 0) | |||
| return | |||
| } | |||
| b._forEachPageNode(b.root, 0, fn) | |||
| } | |||
| func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { | |||
| var p, n = b.pageNode(pgid) | |||
| // Execute function. | |||
| fn(p, n, depth) | |||
| // Recursively loop over children. | |||
| if p != nil { | |||
| if (p.flags & branchPageFlag) != 0 { | |||
| for i := 0; i < int(p.count); i++ { | |||
| elem := p.branchPageElement(uint16(i)) | |||
| b._forEachPageNode(elem.pgid, depth+1, fn) | |||
| } | |||
| } | |||
| } else { | |||
| if !n.isLeaf { | |||
| for _, inode := range n.inodes { | |||
| b._forEachPageNode(inode.pgid, depth+1, fn) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| // spill writes all the nodes for this bucket to dirty pages. | |||
| func (b *Bucket) spill() error { | |||
| // Spill all child buckets first. | |||
| for name, child := range b.buckets { | |||
| // If the child bucket is small enough and it has no child buckets then | |||
| // write it inline into the parent bucket's page. Otherwise spill it | |||
| // like a normal bucket and make the parent value a pointer to the page. | |||
| var value []byte | |||
| if child.inlineable() { | |||
| child.free() | |||
| value = child.write() | |||
| } else { | |||
| if err := child.spill(); err != nil { | |||
| return err | |||
| } | |||
| // Update the child bucket header in this bucket. | |||
| value = make([]byte, unsafe.Sizeof(bucket{})) | |||
| var bucket = (*bucket)(unsafe.Pointer(&value[0])) | |||
| *bucket = *child.bucket | |||
| } | |||
| // Skip writing the bucket if there are no materialized nodes. | |||
| if child.rootNode == nil { | |||
| continue | |||
| } | |||
| // Update parent node. | |||
| var c = b.Cursor() | |||
| k, _, flags := c.seek([]byte(name)) | |||
| if !bytes.Equal([]byte(name), k) { | |||
| panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) | |||
| } | |||
| if flags&bucketLeafFlag == 0 { | |||
| panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) | |||
| } | |||
| c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) | |||
| } | |||
| // Ignore if there's not a materialized root node. | |||
| if b.rootNode == nil { | |||
| return nil | |||
| } | |||
| // Spill nodes. | |||
| if err := b.rootNode.spill(); err != nil { | |||
| return err | |||
| } | |||
| b.rootNode = b.rootNode.root() | |||
| // Update the root node for this bucket. | |||
| if b.rootNode.pgid >= b.tx.meta.pgid { | |||
| panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) | |||
| } | |||
| b.root = b.rootNode.pgid | |||
| return nil | |||
| } | |||
| // inlineable returns true if a bucket is small enough to be written inline | |||
| // and if it contains no subbuckets. Otherwise returns false. | |||
| func (b *Bucket) inlineable() bool { | |||
| var n = b.rootNode | |||
| // Bucket must only contain a single leaf node. | |||
| if n == nil || !n.isLeaf { | |||
| return false | |||
| } | |||
| // Bucket is not inlineable if it contains subbuckets or if it goes beyond | |||
| // our threshold for inline bucket size. | |||
| var size = pageHeaderSize | |||
| for _, inode := range n.inodes { | |||
| size += leafPageElementSize + len(inode.key) + len(inode.value) | |||
| if inode.flags&bucketLeafFlag != 0 { | |||
| return false | |||
| } else if size > b.maxInlineBucketSize() { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| // Returns the maximum total size of a bucket to make it a candidate for inlining. | |||
| func (b *Bucket) maxInlineBucketSize() int { | |||
| return b.tx.db.pageSize / 4 | |||
| } | |||
| // write allocates and writes a bucket to a byte slice. | |||
| func (b *Bucket) write() []byte { | |||
| // Allocate the appropriate size. | |||
| var n = b.rootNode | |||
| var value = make([]byte, bucketHeaderSize+n.size()) | |||
| // Write a bucket header. | |||
| var bucket = (*bucket)(unsafe.Pointer(&value[0])) | |||
| *bucket = *b.bucket | |||
| // Convert byte slice to a fake page and write the root node. | |||
| var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) | |||
| n.write(p) | |||
| return value | |||
| } | |||
| // rebalance attempts to balance all nodes. | |||
| func (b *Bucket) rebalance() { | |||
| for _, n := range b.nodes { | |||
| n.rebalance() | |||
| } | |||
| for _, child := range b.buckets { | |||
| child.rebalance() | |||
| } | |||
| } | |||
| // node creates a node from a page and associates it with a given parent. | |||
| func (b *Bucket) node(pgid pgid, parent *node) *node { | |||
| _assert(b.nodes != nil, "nodes map expected") | |||
| // Retrieve node if it's already been created. | |||
| if n := b.nodes[pgid]; n != nil { | |||
| return n | |||
| } | |||
| // Otherwise create a node and cache it. | |||
| n := &node{bucket: b, parent: parent} | |||
| if parent == nil { | |||
| b.rootNode = n | |||
| } else { | |||
| parent.children = append(parent.children, n) | |||
| } | |||
| // Use the inline page if this is an inline bucket. | |||
| var p = b.page | |||
| if p == nil { | |||
| p = b.tx.page(pgid) | |||
| } | |||
| // Read the page into the node and cache it. | |||
| n.read(p) | |||
| b.nodes[pgid] = n | |||
| // Update statistics. | |||
| b.tx.stats.NodeCount++ | |||
| return n | |||
| } | |||
| // free recursively frees all pages in the bucket. | |||
| func (b *Bucket) free() { | |||
| if b.root == 0 { | |||
| return | |||
| } | |||
| var tx = b.tx | |||
| b.forEachPageNode(func(p *page, n *node, _ int) { | |||
| if p != nil { | |||
| tx.db.freelist.free(tx.meta.txid, p) | |||
| } else { | |||
| n.free() | |||
| } | |||
| }) | |||
| b.root = 0 | |||
| } | |||
| // dereference removes all references to the old mmap. | |||
| func (b *Bucket) dereference() { | |||
| if b.rootNode != nil { | |||
| b.rootNode.root().dereference() | |||
| } | |||
| for _, child := range b.buckets { | |||
| child.dereference() | |||
| } | |||
| } | |||
| // pageNode returns the in-memory node, if it exists. | |||
| // Otherwise returns the underlying page. | |||
| func (b *Bucket) pageNode(id pgid) (*page, *node) { | |||
| // Inline buckets have a fake page embedded in their value so treat them | |||
| // differently. We'll return the rootNode (if available) or the fake page. | |||
| if b.root == 0 { | |||
| if id != 0 { | |||
| panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) | |||
| } | |||
| if b.rootNode != nil { | |||
| return nil, b.rootNode | |||
| } | |||
| return b.page, nil | |||
| } | |||
| // Check the node cache for non-inline buckets. | |||
| if b.nodes != nil { | |||
| if n := b.nodes[id]; n != nil { | |||
| return nil, n | |||
| } | |||
| } | |||
| // Finally lookup the page from the transaction if no node is materialized. | |||
| return b.tx.page(id), nil | |||
| } | |||
| // BucketStats records statistics about resources used by a bucket. | |||
| type BucketStats struct { | |||
| // Page count statistics. | |||
| BranchPageN int // number of logical branch pages | |||
| BranchOverflowN int // number of physical branch overflow pages | |||
| LeafPageN int // number of logical leaf pages | |||
| LeafOverflowN int // number of physical leaf overflow pages | |||
| // Tree statistics. | |||
| KeyN int // number of keys/value pairs | |||
| Depth int // number of levels in B+tree | |||
| // Page size utilization. | |||
| BranchAlloc int // bytes allocated for physical branch pages | |||
| BranchInuse int // bytes actually used for branch data | |||
| LeafAlloc int // bytes allocated for physical leaf pages | |||
| LeafInuse int // bytes actually used for leaf data | |||
| // Bucket statistics | |||
| BucketN int // total number of buckets including the top bucket | |||
| InlineBucketN int // total number on inlined buckets | |||
| InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) | |||
| } | |||
| func (s *BucketStats) Add(other BucketStats) { | |||
| s.BranchPageN += other.BranchPageN | |||
| s.BranchOverflowN += other.BranchOverflowN | |||
| s.LeafPageN += other.LeafPageN | |||
| s.LeafOverflowN += other.LeafOverflowN | |||
| s.KeyN += other.KeyN | |||
| if s.Depth < other.Depth { | |||
| s.Depth = other.Depth | |||
| } | |||
| s.BranchAlloc += other.BranchAlloc | |||
| s.BranchInuse += other.BranchInuse | |||
| s.LeafAlloc += other.LeafAlloc | |||
| s.LeafInuse += other.LeafInuse | |||
| s.BucketN += other.BucketN | |||
| s.InlineBucketN += other.InlineBucketN | |||
| s.InlineBucketInuse += other.InlineBucketInuse | |||
| } | |||
| // cloneBytes returns a copy of a given slice. | |||
| func cloneBytes(v []byte) []byte { | |||
| var clone = make([]byte, len(v)) | |||
| copy(clone, v) | |||
| return clone | |||
| } | |||
| @@ -0,0 +1,400 @@ | |||
| package bolt | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "sort" | |||
| ) | |||
| // Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. | |||
| // Cursors see nested buckets with value == nil. | |||
| // Cursors can be obtained from a transaction and are valid as long as the transaction is open. | |||
| // | |||
| // Keys and values returned from the cursor are only valid for the life of the transaction. | |||
| // | |||
| // Changing data while traversing with a cursor may cause it to be invalidated | |||
| // and return unexpected keys and/or values. You must reposition your cursor | |||
| // after mutating data. | |||
| type Cursor struct { | |||
| bucket *Bucket | |||
| stack []elemRef | |||
| } | |||
| // Bucket returns the bucket that this cursor was created from. | |||
| func (c *Cursor) Bucket() *Bucket { | |||
| return c.bucket | |||
| } | |||
| // First moves the cursor to the first item in the bucket and returns its key and value. | |||
| // If the bucket is empty then a nil key and value are returned. | |||
| // The returned key and value are only valid for the life of the transaction. | |||
| func (c *Cursor) First() (key []byte, value []byte) { | |||
| _assert(c.bucket.tx.db != nil, "tx closed") | |||
| c.stack = c.stack[:0] | |||
| p, n := c.bucket.pageNode(c.bucket.root) | |||
| c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) | |||
| c.first() | |||
| // If we land on an empty page then move to the next value. | |||
| // https://github.com/boltdb/bolt/issues/450 | |||
| if c.stack[len(c.stack)-1].count() == 0 { | |||
| c.next() | |||
| } | |||
| k, v, flags := c.keyValue() | |||
| if (flags & uint32(bucketLeafFlag)) != 0 { | |||
| return k, nil | |||
| } | |||
| return k, v | |||
| } | |||
| // Last moves the cursor to the last item in the bucket and returns its key and value. | |||
| // If the bucket is empty then a nil key and value are returned. | |||
| // The returned key and value are only valid for the life of the transaction. | |||
| func (c *Cursor) Last() (key []byte, value []byte) { | |||
| _assert(c.bucket.tx.db != nil, "tx closed") | |||
| c.stack = c.stack[:0] | |||
| p, n := c.bucket.pageNode(c.bucket.root) | |||
| ref := elemRef{page: p, node: n} | |||
| ref.index = ref.count() - 1 | |||
| c.stack = append(c.stack, ref) | |||
| c.last() | |||
| k, v, flags := c.keyValue() | |||
| if (flags & uint32(bucketLeafFlag)) != 0 { | |||
| return k, nil | |||
| } | |||
| return k, v | |||
| } | |||
| // Next moves the cursor to the next item in the bucket and returns its key and value. | |||
| // If the cursor is at the end of the bucket then a nil key and value are returned. | |||
| // The returned key and value are only valid for the life of the transaction. | |||
| func (c *Cursor) Next() (key []byte, value []byte) { | |||
| _assert(c.bucket.tx.db != nil, "tx closed") | |||
| k, v, flags := c.next() | |||
| if (flags & uint32(bucketLeafFlag)) != 0 { | |||
| return k, nil | |||
| } | |||
| return k, v | |||
| } | |||
| // Prev moves the cursor to the previous item in the bucket and returns its key and value. | |||
| // If the cursor is at the beginning of the bucket then a nil key and value are returned. | |||
| // The returned key and value are only valid for the life of the transaction. | |||
| func (c *Cursor) Prev() (key []byte, value []byte) { | |||
| _assert(c.bucket.tx.db != nil, "tx closed") | |||
| // Attempt to move back one element until we're successful. | |||
| // Move up the stack as we hit the beginning of each page in our stack. | |||
| for i := len(c.stack) - 1; i >= 0; i-- { | |||
| elem := &c.stack[i] | |||
| if elem.index > 0 { | |||
| elem.index-- | |||
| break | |||
| } | |||
| c.stack = c.stack[:i] | |||
| } | |||
| // If we've hit the end then return nil. | |||
| if len(c.stack) == 0 { | |||
| return nil, nil | |||
| } | |||
| // Move down the stack to find the last element of the last leaf under this branch. | |||
| c.last() | |||
| k, v, flags := c.keyValue() | |||
| if (flags & uint32(bucketLeafFlag)) != 0 { | |||
| return k, nil | |||
| } | |||
| return k, v | |||
| } | |||
| // Seek moves the cursor to a given key and returns it. | |||
| // If the key does not exist then the next key is used. If no keys | |||
| // follow, a nil key is returned. | |||
| // The returned key and value are only valid for the life of the transaction. | |||
| func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { | |||
| k, v, flags := c.seek(seek) | |||
| // If we ended up after the last element of a page then move to the next one. | |||
| if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { | |||
| k, v, flags = c.next() | |||
| } | |||
| if k == nil { | |||
| return nil, nil | |||
| } else if (flags & uint32(bucketLeafFlag)) != 0 { | |||
| return k, nil | |||
| } | |||
| return k, v | |||
| } | |||
| // Delete removes the current key/value under the cursor from the bucket. | |||
| // Delete fails if current key/value is a bucket or if the transaction is not writable. | |||
| func (c *Cursor) Delete() error { | |||
| if c.bucket.tx.db == nil { | |||
| return ErrTxClosed | |||
| } else if !c.bucket.Writable() { | |||
| return ErrTxNotWritable | |||
| } | |||
| key, _, flags := c.keyValue() | |||
| // Return an error if current value is a bucket. | |||
| if (flags & bucketLeafFlag) != 0 { | |||
| return ErrIncompatibleValue | |||
| } | |||
| c.node().del(key) | |||
| return nil | |||
| } | |||
| // seek moves the cursor to a given key and returns it. | |||
| // If the key does not exist then the next key is used. | |||
| func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { | |||
| _assert(c.bucket.tx.db != nil, "tx closed") | |||
| // Start from root page/node and traverse to correct page. | |||
| c.stack = c.stack[:0] | |||
| c.search(seek, c.bucket.root) | |||
| ref := &c.stack[len(c.stack)-1] | |||
| // If the cursor is pointing to the end of page/node then return nil. | |||
| if ref.index >= ref.count() { | |||
| return nil, nil, 0 | |||
| } | |||
| // If this is a bucket then return a nil value. | |||
| return c.keyValue() | |||
| } | |||
| // first moves the cursor to the first leaf element under the last page in the stack. | |||
| func (c *Cursor) first() { | |||
| for { | |||
| // Exit when we hit a leaf page. | |||
| var ref = &c.stack[len(c.stack)-1] | |||
| if ref.isLeaf() { | |||
| break | |||
| } | |||
| // Keep adding pages pointing to the first element to the stack. | |||
| var pgid pgid | |||
| if ref.node != nil { | |||
| pgid = ref.node.inodes[ref.index].pgid | |||
| } else { | |||
| pgid = ref.page.branchPageElement(uint16(ref.index)).pgid | |||
| } | |||
| p, n := c.bucket.pageNode(pgid) | |||
| c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) | |||
| } | |||
| } | |||
| // last moves the cursor to the last leaf element under the last page in the stack. | |||
| func (c *Cursor) last() { | |||
| for { | |||
| // Exit when we hit a leaf page. | |||
| ref := &c.stack[len(c.stack)-1] | |||
| if ref.isLeaf() { | |||
| break | |||
| } | |||
| // Keep adding pages pointing to the last element in the stack. | |||
| var pgid pgid | |||
| if ref.node != nil { | |||
| pgid = ref.node.inodes[ref.index].pgid | |||
| } else { | |||
| pgid = ref.page.branchPageElement(uint16(ref.index)).pgid | |||
| } | |||
| p, n := c.bucket.pageNode(pgid) | |||
| var nextRef = elemRef{page: p, node: n} | |||
| nextRef.index = nextRef.count() - 1 | |||
| c.stack = append(c.stack, nextRef) | |||
| } | |||
| } | |||
| // next moves to the next leaf element and returns the key and value. | |||
| // If the cursor is at the last leaf element then it stays there and returns nil. | |||
| func (c *Cursor) next() (key []byte, value []byte, flags uint32) { | |||
| for { | |||
| // Attempt to move over one element until we're successful. | |||
| // Move up the stack as we hit the end of each page in our stack. | |||
| var i int | |||
| for i = len(c.stack) - 1; i >= 0; i-- { | |||
| elem := &c.stack[i] | |||
| if elem.index < elem.count()-1 { | |||
| elem.index++ | |||
| break | |||
| } | |||
| } | |||
| // If we've hit the root page then stop and return. This will leave the | |||
| // cursor on the last element of the last page. | |||
| if i == -1 { | |||
| return nil, nil, 0 | |||
| } | |||
| // Otherwise start from where we left off in the stack and find the | |||
| // first element of the first leaf page. | |||
| c.stack = c.stack[:i+1] | |||
| c.first() | |||
| // If this is an empty page then restart and move back up the stack. | |||
| // https://github.com/boltdb/bolt/issues/450 | |||
| if c.stack[len(c.stack)-1].count() == 0 { | |||
| continue | |||
| } | |||
| return c.keyValue() | |||
| } | |||
| } | |||
| // search recursively performs a binary search against a given page/node until it finds a given key. | |||
| func (c *Cursor) search(key []byte, pgid pgid) { | |||
| p, n := c.bucket.pageNode(pgid) | |||
| if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { | |||
| panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) | |||
| } | |||
| e := elemRef{page: p, node: n} | |||
| c.stack = append(c.stack, e) | |||
| // If we're on a leaf page/node then find the specific node. | |||
| if e.isLeaf() { | |||
| c.nsearch(key) | |||
| return | |||
| } | |||
| if n != nil { | |||
| c.searchNode(key, n) | |||
| return | |||
| } | |||
| c.searchPage(key, p) | |||
| } | |||
| func (c *Cursor) searchNode(key []byte, n *node) { | |||
| var exact bool | |||
| index := sort.Search(len(n.inodes), func(i int) bool { | |||
| // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. | |||
| // sort.Search() finds the lowest index where f() != -1 but we need the highest index. | |||
| ret := bytes.Compare(n.inodes[i].key, key) | |||
| if ret == 0 { | |||
| exact = true | |||
| } | |||
| return ret != -1 | |||
| }) | |||
| if !exact && index > 0 { | |||
| index-- | |||
| } | |||
| c.stack[len(c.stack)-1].index = index | |||
| // Recursively search to the next page. | |||
| c.search(key, n.inodes[index].pgid) | |||
| } | |||
| func (c *Cursor) searchPage(key []byte, p *page) { | |||
| // Binary search for the correct range. | |||
| inodes := p.branchPageElements() | |||
| var exact bool | |||
| index := sort.Search(int(p.count), func(i int) bool { | |||
| // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. | |||
| // sort.Search() finds the lowest index where f() != -1 but we need the highest index. | |||
| ret := bytes.Compare(inodes[i].key(), key) | |||
| if ret == 0 { | |||
| exact = true | |||
| } | |||
| return ret != -1 | |||
| }) | |||
| if !exact && index > 0 { | |||
| index-- | |||
| } | |||
| c.stack[len(c.stack)-1].index = index | |||
| // Recursively search to the next page. | |||
| c.search(key, inodes[index].pgid) | |||
| } | |||
| // nsearch searches the leaf node on the top of the stack for a key. | |||
| func (c *Cursor) nsearch(key []byte) { | |||
| e := &c.stack[len(c.stack)-1] | |||
| p, n := e.page, e.node | |||
| // If we have a node then search its inodes. | |||
| if n != nil { | |||
| index := sort.Search(len(n.inodes), func(i int) bool { | |||
| return bytes.Compare(n.inodes[i].key, key) != -1 | |||
| }) | |||
| e.index = index | |||
| return | |||
| } | |||
| // If we have a page then search its leaf elements. | |||
| inodes := p.leafPageElements() | |||
| index := sort.Search(int(p.count), func(i int) bool { | |||
| return bytes.Compare(inodes[i].key(), key) != -1 | |||
| }) | |||
| e.index = index | |||
| } | |||
| // keyValue returns the key and value of the current leaf element. | |||
| func (c *Cursor) keyValue() ([]byte, []byte, uint32) { | |||
| ref := &c.stack[len(c.stack)-1] | |||
| if ref.count() == 0 || ref.index >= ref.count() { | |||
| return nil, nil, 0 | |||
| } | |||
| // Retrieve value from node. | |||
| if ref.node != nil { | |||
| inode := &ref.node.inodes[ref.index] | |||
| return inode.key, inode.value, inode.flags | |||
| } | |||
| // Or retrieve value from page. | |||
| elem := ref.page.leafPageElement(uint16(ref.index)) | |||
| return elem.key(), elem.value(), elem.flags | |||
| } | |||
| // node returns the node that the cursor is currently positioned on. | |||
| func (c *Cursor) node() *node { | |||
| _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") | |||
| // If the top of the stack is a leaf node then just return it. | |||
| if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { | |||
| return ref.node | |||
| } | |||
| // Start from root and traverse down the hierarchy. | |||
| var n = c.stack[0].node | |||
| if n == nil { | |||
| n = c.bucket.node(c.stack[0].page.id, nil) | |||
| } | |||
| for _, ref := range c.stack[:len(c.stack)-1] { | |||
| _assert(!n.isLeaf, "expected branch node") | |||
| n = n.childAt(int(ref.index)) | |||
| } | |||
| _assert(n.isLeaf, "expected leaf node") | |||
| return n | |||
| } | |||
| // elemRef represents a reference to an element on a given page/node. | |||
| type elemRef struct { | |||
| page *page | |||
| node *node | |||
| index int | |||
| } | |||
| // isLeaf returns whether the ref is pointing at a leaf page/node. | |||
| func (r *elemRef) isLeaf() bool { | |||
| if r.node != nil { | |||
| return r.node.isLeaf | |||
| } | |||
| return (r.page.flags & leafPageFlag) != 0 | |||
| } | |||
| // count returns the number of inodes or page elements. | |||
| func (r *elemRef) count() int { | |||
| if r.node != nil { | |||
| return len(r.node.inodes) | |||
| } | |||
| return int(r.page.count) | |||
| } | |||
| @@ -0,0 +1,993 @@ | |||
| package bolt | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "hash/fnv" | |||
| "log" | |||
| "os" | |||
| "runtime" | |||
| "runtime/debug" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| "unsafe" | |||
| ) | |||
| // The largest step that can be taken when remapping the mmap. | |||
| const maxMmapStep = 1 << 30 // 1GB | |||
| // The data file format version. | |||
| const version = 2 | |||
| // Represents a marker value to indicate that a file is a Bolt DB. | |||
| const magic uint32 = 0xED0CDAED | |||
| // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when | |||
| // syncing changes to a file. This is required as some operating systems, | |||
| // such as OpenBSD, do not have a unified buffer cache (UBC) and writes | |||
| // must be synchronized using the msync(2) syscall. | |||
| const IgnoreNoSync = runtime.GOOS == "openbsd" | |||
| // Default values if not set in a DB instance. | |||
| const ( | |||
| DefaultMaxBatchSize int = 1000 | |||
| DefaultMaxBatchDelay = 10 * time.Millisecond | |||
| DefaultAllocSize = 16 * 1024 * 1024 | |||
| ) | |||
| // DB represents a collection of buckets persisted to a file on disk. | |||
| // All data access is performed through transactions which can be obtained through the DB. | |||
| // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. | |||
| type DB struct { | |||
| // When enabled, the database will perform a Check() after every commit. | |||
| // A panic is issued if the database is in an inconsistent state. This | |||
| // flag has a large performance impact so it should only be used for | |||
| // debugging purposes. | |||
| StrictMode bool | |||
| // Setting the NoSync flag will cause the database to skip fsync() | |||
| // calls after each commit. This can be useful when bulk loading data | |||
| // into a database and you can restart the bulk load in the event of | |||
| // a system failure or database corruption. Do not set this flag for | |||
| // normal use. | |||
| // | |||
| // If the package global IgnoreNoSync constant is true, this value is | |||
| // ignored. See the comment on that constant for more details. | |||
| // | |||
| // THIS IS UNSAFE. PLEASE USE WITH CAUTION. | |||
| NoSync bool | |||
| // When true, skips the truncate call when growing the database. | |||
| // Setting this to true is only safe on non-ext3/ext4 systems. | |||
| // Skipping truncation avoids preallocation of hard drive space and | |||
| // bypasses a truncate() and fsync() syscall on remapping. | |||
| // | |||
| // https://github.com/boltdb/bolt/issues/284 | |||
| NoGrowSync bool | |||
| // If you want to read the entire database fast, you can set MmapFlag to | |||
| // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. | |||
| MmapFlags int | |||
| // MaxBatchSize is the maximum size of a batch. Default value is | |||
| // copied from DefaultMaxBatchSize in Open. | |||
| // | |||
| // If <=0, disables batching. | |||
| // | |||
| // Do not change concurrently with calls to Batch. | |||
| MaxBatchSize int | |||
| // MaxBatchDelay is the maximum delay before a batch starts. | |||
| // Default value is copied from DefaultMaxBatchDelay in Open. | |||
| // | |||
| // If <=0, effectively disables batching. | |||
| // | |||
| // Do not change concurrently with calls to Batch. | |||
| MaxBatchDelay time.Duration | |||
| // AllocSize is the amount of space allocated when the database | |||
| // needs to create new pages. This is done to amortize the cost | |||
| // of truncate() and fsync() when growing the data file. | |||
| AllocSize int | |||
| path string | |||
| file *os.File | |||
| lockfile *os.File // windows only | |||
| dataref []byte // mmap'ed readonly, write throws SEGV | |||
| data *[maxMapSize]byte | |||
| datasz int | |||
| filesz int // current on disk file size | |||
| meta0 *meta | |||
| meta1 *meta | |||
| pageSize int | |||
| opened bool | |||
| rwtx *Tx | |||
| txs []*Tx | |||
| freelist *freelist | |||
| stats Stats | |||
| batchMu sync.Mutex | |||
| batch *batch | |||
| rwlock sync.Mutex // Allows only one writer at a time. | |||
| metalock sync.Mutex // Protects meta page access. | |||
| mmaplock sync.RWMutex // Protects mmap access during remapping. | |||
| statlock sync.RWMutex // Protects stats access. | |||
| ops struct { | |||
| writeAt func(b []byte, off int64) (n int, err error) | |||
| } | |||
| // Read only mode. | |||
| // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. | |||
| readOnly bool | |||
| } | |||
| // Path returns the path to currently open database file. | |||
| func (db *DB) Path() string { | |||
| return db.path | |||
| } | |||
| // GoString returns the Go string representation of the database. | |||
| func (db *DB) GoString() string { | |||
| return fmt.Sprintf("bolt.DB{path:%q}", db.path) | |||
| } | |||
| // String returns the string representation of the database. | |||
| func (db *DB) String() string { | |||
| return fmt.Sprintf("DB<%q>", db.path) | |||
| } | |||
| // Open creates and opens a database at the given path. | |||
| // If the file does not exist then it will be created automatically. | |||
| // Passing in nil options will cause Bolt to open the database with the default options. | |||
| func Open(path string, mode os.FileMode, options *Options) (*DB, error) { | |||
| var db = &DB{opened: true} | |||
| // Set default options if no options are provided. | |||
| if options == nil { | |||
| options = DefaultOptions | |||
| } | |||
| db.NoGrowSync = options.NoGrowSync | |||
| db.MmapFlags = options.MmapFlags | |||
| // Set default values for later DB operations. | |||
| db.MaxBatchSize = DefaultMaxBatchSize | |||
| db.MaxBatchDelay = DefaultMaxBatchDelay | |||
| db.AllocSize = DefaultAllocSize | |||
| flag := os.O_RDWR | |||
| if options.ReadOnly { | |||
| flag = os.O_RDONLY | |||
| db.readOnly = true | |||
| } | |||
| // Open data file and separate sync handler for metadata writes. | |||
| db.path = path | |||
| var err error | |||
| if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { | |||
| _ = db.close() | |||
| return nil, err | |||
| } | |||
| // Lock file so that other processes using Bolt in read-write mode cannot | |||
| // use the database at the same time. This would cause corruption since | |||
| // the two processes would write meta pages and free pages separately. | |||
| // The database file is locked exclusively (only one process can grab the lock) | |||
| // if !options.ReadOnly. | |||
| // The database file is locked using the shared lock (more than one process may | |||
| // hold a lock at the same time) otherwise (options.ReadOnly is set). | |||
| if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { | |||
| _ = db.close() | |||
| return nil, err | |||
| } | |||
| // Default values for test hooks | |||
| db.ops.writeAt = db.file.WriteAt | |||
| // Initialize the database if it doesn't exist. | |||
| if info, err := db.file.Stat(); err != nil { | |||
| return nil, err | |||
| } else if info.Size() == 0 { | |||
| // Initialize new files with meta pages. | |||
| if err := db.init(); err != nil { | |||
| return nil, err | |||
| } | |||
| } else { | |||
| // Read the first meta page to determine the page size. | |||
| var buf [0x1000]byte | |||
| if _, err := db.file.ReadAt(buf[:], 0); err == nil { | |||
| m := db.pageInBuffer(buf[:], 0).meta() | |||
| if err := m.validate(); err != nil { | |||
| return nil, err | |||
| } | |||
| db.pageSize = int(m.pageSize) | |||
| } | |||
| } | |||
| // Memory map the data file. | |||
| if err := db.mmap(options.InitialMmapSize); err != nil { | |||
| _ = db.close() | |||
| return nil, err | |||
| } | |||
| // Read in the freelist. | |||
| db.freelist = newFreelist() | |||
| db.freelist.read(db.page(db.meta().freelist)) | |||
| // Mark the database as opened and return. | |||
| return db, nil | |||
| } | |||
| // mmap opens the underlying memory-mapped file and initializes the meta references. | |||
| // minsz is the minimum size that the new mmap can be. | |||
| func (db *DB) mmap(minsz int) error { | |||
| db.mmaplock.Lock() | |||
| defer db.mmaplock.Unlock() | |||
| info, err := db.file.Stat() | |||
| if err != nil { | |||
| return fmt.Errorf("mmap stat error: %s", err) | |||
| } else if int(info.Size()) < db.pageSize*2 { | |||
| return fmt.Errorf("file size too small") | |||
| } | |||
| // Ensure the size is at least the minimum size. | |||
| var size = int(info.Size()) | |||
| if size < minsz { | |||
| size = minsz | |||
| } | |||
| size, err = db.mmapSize(size) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Dereference all mmap references before unmapping. | |||
| if db.rwtx != nil { | |||
| db.rwtx.root.dereference() | |||
| } | |||
| // Unmap existing data before continuing. | |||
| if err := db.munmap(); err != nil { | |||
| return err | |||
| } | |||
| // Memory-map the data file as a byte slice. | |||
| if err := mmap(db, size); err != nil { | |||
| return err | |||
| } | |||
| // Save references to the meta pages. | |||
| db.meta0 = db.page(0).meta() | |||
| db.meta1 = db.page(1).meta() | |||
| // Validate the meta pages. | |||
| if err := db.meta0.validate(); err != nil { | |||
| return err | |||
| } | |||
| if err := db.meta1.validate(); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // munmap unmaps the data file from memory. | |||
| func (db *DB) munmap() error { | |||
| if err := munmap(db); err != nil { | |||
| return fmt.Errorf("unmap error: " + err.Error()) | |||
| } | |||
| return nil | |||
| } | |||
| // mmapSize determines the appropriate size for the mmap given the current size | |||
| // of the database. The minimum size is 32KB and doubles until it reaches 1GB. | |||
| // Returns an error if the new mmap size is greater than the max allowed. | |||
| func (db *DB) mmapSize(size int) (int, error) { | |||
| // Double the size from 32KB until 1GB. | |||
| for i := uint(15); i <= 30; i++ { | |||
| if size <= 1<<i { | |||
| return 1 << i, nil | |||
| } | |||
| } | |||
| // Verify the requested size is not above the maximum allowed. | |||
| if size > maxMapSize { | |||
| return 0, fmt.Errorf("mmap too large") | |||
| } | |||
| // If larger than 1GB then grow by 1GB at a time. | |||
| sz := int64(size) | |||
| if remainder := sz % int64(maxMmapStep); remainder > 0 { | |||
| sz += int64(maxMmapStep) - remainder | |||
| } | |||
| // Ensure that the mmap size is a multiple of the page size. | |||
| // This should always be true since we're incrementing in MBs. | |||
| pageSize := int64(db.pageSize) | |||
| if (sz % pageSize) != 0 { | |||
| sz = ((sz / pageSize) + 1) * pageSize | |||
| } | |||
| // If we've exceeded the max size then only grow up to the max size. | |||
| if sz > maxMapSize { | |||
| sz = maxMapSize | |||
| } | |||
| return int(sz), nil | |||
| } | |||
| // init creates a new database file and initializes its meta pages. | |||
| func (db *DB) init() error { | |||
| // Set the page size to the OS page size. | |||
| db.pageSize = os.Getpagesize() | |||
| // Create two meta pages on a buffer. | |||
| buf := make([]byte, db.pageSize*4) | |||
| for i := 0; i < 2; i++ { | |||
| p := db.pageInBuffer(buf[:], pgid(i)) | |||
| p.id = pgid(i) | |||
| p.flags = metaPageFlag | |||
| // Initialize the meta page. | |||
| m := p.meta() | |||
| m.magic = magic | |||
| m.version = version | |||
| m.pageSize = uint32(db.pageSize) | |||
| m.freelist = 2 | |||
| m.root = bucket{root: 3} | |||
| m.pgid = 4 | |||
| m.txid = txid(i) | |||
| } | |||
| // Write an empty freelist at page 3. | |||
| p := db.pageInBuffer(buf[:], pgid(2)) | |||
| p.id = pgid(2) | |||
| p.flags = freelistPageFlag | |||
| p.count = 0 | |||
| // Write an empty leaf page at page 4. | |||
| p = db.pageInBuffer(buf[:], pgid(3)) | |||
| p.id = pgid(3) | |||
| p.flags = leafPageFlag | |||
| p.count = 0 | |||
| // Write the buffer to our data file. | |||
| if _, err := db.ops.writeAt(buf, 0); err != nil { | |||
| return err | |||
| } | |||
| if err := fdatasync(db); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // Close releases all database resources. | |||
| // All transactions must be closed before closing the database. | |||
| func (db *DB) Close() error { | |||
| db.rwlock.Lock() | |||
| defer db.rwlock.Unlock() | |||
| db.metalock.Lock() | |||
| defer db.metalock.Unlock() | |||
| db.mmaplock.RLock() | |||
| defer db.mmaplock.RUnlock() | |||
| return db.close() | |||
| } | |||
| func (db *DB) close() error { | |||
| if !db.opened { | |||
| return nil | |||
| } | |||
| db.opened = false | |||
| db.freelist = nil | |||
| db.path = "" | |||
| // Clear ops. | |||
| db.ops.writeAt = nil | |||
| // Close the mmap. | |||
| if err := db.munmap(); err != nil { | |||
| return err | |||
| } | |||
| // Close file handles. | |||
| if db.file != nil { | |||
| // No need to unlock read-only file. | |||
| if !db.readOnly { | |||
| // Unlock the file. | |||
| if err := funlock(db); err != nil { | |||
| log.Printf("bolt.Close(): funlock error: %s", err) | |||
| } | |||
| } | |||
| // Close the file descriptor. | |||
| if err := db.file.Close(); err != nil { | |||
| return fmt.Errorf("db file close: %s", err) | |||
| } | |||
| db.file = nil | |||
| } | |||
| return nil | |||
| } | |||
| // Begin starts a new transaction. | |||
| // Multiple read-only transactions can be used concurrently but only one | |||
| // write transaction can be used at a time. Starting multiple write transactions | |||
| // will cause the calls to block and be serialized until the current write | |||
| // transaction finishes. | |||
| // | |||
| // Transactions should not be dependent on one another. Opening a read | |||
| // transaction and a write transaction in the same goroutine can cause the | |||
| // writer to deadlock because the database periodically needs to re-mmap itself | |||
| // as it grows and it cannot do that while a read transaction is open. | |||
| // | |||
| // If a long running read transaction (for example, a snapshot transaction) is | |||
| // needed, you might want to set DB.InitialMmapSize to a large enough value | |||
| // to avoid potential blocking of write transaction. | |||
| // | |||
| // IMPORTANT: You must close read-only transactions after you are finished or | |||
| // else the database will not reclaim old pages. | |||
| func (db *DB) Begin(writable bool) (*Tx, error) { | |||
| if writable { | |||
| return db.beginRWTx() | |||
| } | |||
| return db.beginTx() | |||
| } | |||
| func (db *DB) beginTx() (*Tx, error) { | |||
| // Lock the meta pages while we initialize the transaction. We obtain | |||
| // the meta lock before the mmap lock because that's the order that the | |||
| // write transaction will obtain them. | |||
| db.metalock.Lock() | |||
| // Obtain a read-only lock on the mmap. When the mmap is remapped it will | |||
| // obtain a write lock so all transactions must finish before it can be | |||
| // remapped. | |||
| db.mmaplock.RLock() | |||
| // Exit if the database is not open yet. | |||
| if !db.opened { | |||
| db.mmaplock.RUnlock() | |||
| db.metalock.Unlock() | |||
| return nil, ErrDatabaseNotOpen | |||
| } | |||
| // Create a transaction associated with the database. | |||
| t := &Tx{} | |||
| t.init(db) | |||
| // Keep track of transaction until it closes. | |||
| db.txs = append(db.txs, t) | |||
| n := len(db.txs) | |||
| // Unlock the meta pages. | |||
| db.metalock.Unlock() | |||
| // Update the transaction stats. | |||
| db.statlock.Lock() | |||
| db.stats.TxN++ | |||
| db.stats.OpenTxN = n | |||
| db.statlock.Unlock() | |||
| return t, nil | |||
| } | |||
| func (db *DB) beginRWTx() (*Tx, error) { | |||
| // If the database was opened with Options.ReadOnly, return an error. | |||
| if db.readOnly { | |||
| return nil, ErrDatabaseReadOnly | |||
| } | |||
| // Obtain writer lock. This is released by the transaction when it closes. | |||
| // This enforces only one writer transaction at a time. | |||
| db.rwlock.Lock() | |||
| // Once we have the writer lock then we can lock the meta pages so that | |||
| // we can set up the transaction. | |||
| db.metalock.Lock() | |||
| defer db.metalock.Unlock() | |||
| // Exit if the database is not open yet. | |||
| if !db.opened { | |||
| db.rwlock.Unlock() | |||
| return nil, ErrDatabaseNotOpen | |||
| } | |||
| // Create a transaction associated with the database. | |||
| t := &Tx{writable: true} | |||
| t.init(db) | |||
| db.rwtx = t | |||
| // Free any pages associated with closed read-only transactions. | |||
| var minid txid = 0xFFFFFFFFFFFFFFFF | |||
| for _, t := range db.txs { | |||
| if t.meta.txid < minid { | |||
| minid = t.meta.txid | |||
| } | |||
| } | |||
| if minid > 0 { | |||
| db.freelist.release(minid - 1) | |||
| } | |||
| return t, nil | |||
| } | |||
| // removeTx removes a transaction from the database. | |||
| func (db *DB) removeTx(tx *Tx) { | |||
| // Release the read lock on the mmap. | |||
| db.mmaplock.RUnlock() | |||
| // Use the meta lock to restrict access to the DB object. | |||
| db.metalock.Lock() | |||
| // Remove the transaction. | |||
| for i, t := range db.txs { | |||
| if t == tx { | |||
| db.txs = append(db.txs[:i], db.txs[i+1:]...) | |||
| break | |||
| } | |||
| } | |||
| n := len(db.txs) | |||
| // Unlock the meta pages. | |||
| db.metalock.Unlock() | |||
| // Merge statistics. | |||
| db.statlock.Lock() | |||
| db.stats.OpenTxN = n | |||
| db.stats.TxStats.add(&tx.stats) | |||
| db.statlock.Unlock() | |||
| } | |||
| // Update executes a function within the context of a read-write managed transaction. | |||
| // If no error is returned from the function then the transaction is committed. | |||
| // If an error is returned then the entire transaction is rolled back. | |||
| // Any error that is returned from the function or returned from the commit is | |||
| // returned from the Update() method. | |||
| // | |||
| // Attempting to manually commit or rollback within the function will cause a panic. | |||
| func (db *DB) Update(fn func(*Tx) error) error { | |||
| t, err := db.Begin(true) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Make sure the transaction rolls back in the event of a panic. | |||
| defer func() { | |||
| if t.db != nil { | |||
| t.rollback() | |||
| } | |||
| }() | |||
| // Mark as a managed tx so that the inner function cannot manually commit. | |||
| t.managed = true | |||
| // If an error is returned from the function then rollback and return error. | |||
| err = fn(t) | |||
| t.managed = false | |||
| if err != nil { | |||
| _ = t.Rollback() | |||
| return err | |||
| } | |||
| return t.Commit() | |||
| } | |||
| // View executes a function within the context of a managed read-only transaction. | |||
| // Any error that is returned from the function is returned from the View() method. | |||
| // | |||
| // Attempting to manually rollback within the function will cause a panic. | |||
| func (db *DB) View(fn func(*Tx) error) error { | |||
| t, err := db.Begin(false) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Make sure the transaction rolls back in the event of a panic. | |||
| defer func() { | |||
| if t.db != nil { | |||
| t.rollback() | |||
| } | |||
| }() | |||
| // Mark as a managed tx so that the inner function cannot manually rollback. | |||
| t.managed = true | |||
| // If an error is returned from the function then pass it through. | |||
| err = fn(t) | |||
| t.managed = false | |||
| if err != nil { | |||
| _ = t.Rollback() | |||
| return err | |||
| } | |||
| if err := t.Rollback(); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // Batch calls fn as part of a batch. It behaves similar to Update, | |||
| // except: | |||
| // | |||
| // 1. concurrent Batch calls can be combined into a single Bolt | |||
| // transaction. | |||
| // | |||
| // 2. the function passed to Batch may be called multiple times, | |||
| // regardless of whether it returns error or not. | |||
| // | |||
| // This means that Batch function side effects must be idempotent and | |||
| // take permanent effect only after a successful return is seen in | |||
| // caller. | |||
| // | |||
| // The maximum batch size and delay can be adjusted with DB.MaxBatchSize | |||
| // and DB.MaxBatchDelay, respectively. | |||
| // | |||
| // Batch is only useful when there are multiple goroutines calling it. | |||
| func (db *DB) Batch(fn func(*Tx) error) error { | |||
| errCh := make(chan error, 1) | |||
| db.batchMu.Lock() | |||
| if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { | |||
| // There is no existing batch, or the existing batch is full; start a new one. | |||
| db.batch = &batch{ | |||
| db: db, | |||
| } | |||
| db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) | |||
| } | |||
| db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) | |||
| if len(db.batch.calls) >= db.MaxBatchSize { | |||
| // wake up batch, it's ready to run | |||
| go db.batch.trigger() | |||
| } | |||
| db.batchMu.Unlock() | |||
| err := <-errCh | |||
| if err == trySolo { | |||
| err = db.Update(fn) | |||
| } | |||
| return err | |||
| } | |||
| type call struct { | |||
| fn func(*Tx) error | |||
| err chan<- error | |||
| } | |||
| type batch struct { | |||
| db *DB | |||
| timer *time.Timer | |||
| start sync.Once | |||
| calls []call | |||
| } | |||
| // trigger runs the batch if it hasn't already been run. | |||
| func (b *batch) trigger() { | |||
| b.start.Do(b.run) | |||
| } | |||
| // run performs the transactions in the batch and communicates results | |||
| // back to DB.Batch. | |||
| func (b *batch) run() { | |||
| b.db.batchMu.Lock() | |||
| b.timer.Stop() | |||
| // Make sure no new work is added to this batch, but don't break | |||
| // other batches. | |||
| if b.db.batch == b { | |||
| b.db.batch = nil | |||
| } | |||
| b.db.batchMu.Unlock() | |||
| retry: | |||
| for len(b.calls) > 0 { | |||
| var failIdx = -1 | |||
| err := b.db.Update(func(tx *Tx) error { | |||
| for i, c := range b.calls { | |||
| if err := safelyCall(c.fn, tx); err != nil { | |||
| failIdx = i | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| }) | |||
| if failIdx >= 0 { | |||
| // take the failing transaction out of the batch. it's | |||
| // safe to shorten b.calls here because db.batch no longer | |||
| // points to us, and we hold the mutex anyway. | |||
| c := b.calls[failIdx] | |||
| b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] | |||
| // tell the submitter re-run it solo, continue with the rest of the batch | |||
| c.err <- trySolo | |||
| continue retry | |||
| } | |||
| // pass success, or bolt internal errors, to all callers | |||
| for _, c := range b.calls { | |||
| if c.err != nil { | |||
| c.err <- err | |||
| } | |||
| } | |||
| break retry | |||
| } | |||
| } | |||
| // trySolo is a special sentinel error value used for signaling that a | |||
| // transaction function should be re-run. It should never be seen by | |||
| // callers. | |||
| var trySolo = errors.New("batch function returned an error and should be re-run solo") | |||
| type panicked struct { | |||
| reason interface{} | |||
| } | |||
| func (p panicked) Error() string { | |||
| if err, ok := p.reason.(error); ok { | |||
| return err.Error() | |||
| } | |||
| return fmt.Sprintf("panic: %v", p.reason) | |||
| } | |||
| func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { | |||
| defer func() { | |||
| if p := recover(); p != nil { | |||
| err = panicked{p} | |||
| } | |||
| }() | |||
| return fn(tx) | |||
| } | |||
| // Sync executes fdatasync() against the database file handle. | |||
| // | |||
| // This is not necessary under normal operation, however, if you use NoSync | |||
| // then it allows you to force the database file to sync against the disk. | |||
| func (db *DB) Sync() error { return fdatasync(db) } | |||
| // Stats retrieves ongoing performance stats for the database. | |||
| // This is only updated when a transaction closes. | |||
| func (db *DB) Stats() Stats { | |||
| db.statlock.RLock() | |||
| defer db.statlock.RUnlock() | |||
| return db.stats | |||
| } | |||
| // This is for internal access to the raw data bytes from the C cursor, use | |||
| // carefully, or not at all. | |||
| func (db *DB) Info() *Info { | |||
| return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} | |||
| } | |||
| // page retrieves a page reference from the mmap based on the current page size. | |||
| func (db *DB) page(id pgid) *page { | |||
| pos := id * pgid(db.pageSize) | |||
| return (*page)(unsafe.Pointer(&db.data[pos])) | |||
| } | |||
| // pageInBuffer retrieves a page reference from a given byte array based on the current page size. | |||
| func (db *DB) pageInBuffer(b []byte, id pgid) *page { | |||
| return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) | |||
| } | |||
| // meta retrieves the current meta page reference. | |||
| func (db *DB) meta() *meta { | |||
| if db.meta0.txid > db.meta1.txid { | |||
| return db.meta0 | |||
| } | |||
| return db.meta1 | |||
| } | |||
| // allocate returns a contiguous block of memory starting at a given page. | |||
| func (db *DB) allocate(count int) (*page, error) { | |||
| // Allocate a temporary buffer for the page. | |||
| buf := make([]byte, count*db.pageSize) | |||
| p := (*page)(unsafe.Pointer(&buf[0])) | |||
| p.overflow = uint32(count - 1) | |||
| // Use pages from the freelist if they are available. | |||
| if p.id = db.freelist.allocate(count); p.id != 0 { | |||
| return p, nil | |||
| } | |||
| // Resize mmap() if we're at the end. | |||
| p.id = db.rwtx.meta.pgid | |||
| var minsz = int((p.id+pgid(count))+1) * db.pageSize | |||
| if minsz >= db.datasz { | |||
| if err := db.mmap(minsz); err != nil { | |||
| return nil, fmt.Errorf("mmap allocate error: %s", err) | |||
| } | |||
| } | |||
| // Move the page id high water mark. | |||
| db.rwtx.meta.pgid += pgid(count) | |||
| return p, nil | |||
| } | |||
| // grow grows the size of the database to the given sz. | |||
| func (db *DB) grow(sz int) error { | |||
| // Ignore if the new size is less than available file size. | |||
| if sz <= db.filesz { | |||
| return nil | |||
| } | |||
| // If the data is smaller than the alloc size then only allocate what's needed. | |||
| // Once it goes over the allocation size then allocate in chunks. | |||
| if db.datasz < db.AllocSize { | |||
| sz = db.datasz | |||
| } else { | |||
| sz += db.AllocSize | |||
| } | |||
| // Truncate and fsync to ensure file size metadata is flushed. | |||
| // https://github.com/boltdb/bolt/issues/284 | |||
| if !db.NoGrowSync && !db.readOnly { | |||
| if runtime.GOOS != "windows" { | |||
| if err := db.file.Truncate(int64(sz)); err != nil { | |||
| return fmt.Errorf("file resize error: %s", err) | |||
| } | |||
| } | |||
| if err := db.file.Sync(); err != nil { | |||
| return fmt.Errorf("file sync error: %s", err) | |||
| } | |||
| } | |||
| db.filesz = sz | |||
| return nil | |||
| } | |||
| func (db *DB) IsReadOnly() bool { | |||
| return db.readOnly | |||
| } | |||
| // Options represents the options that can be set when opening a database. | |||
| type Options struct { | |||
| // Timeout is the amount of time to wait to obtain a file lock. | |||
| // When set to zero it will wait indefinitely. This option is only | |||
| // available on Darwin and Linux. | |||
| Timeout time.Duration | |||
| // Sets the DB.NoGrowSync flag before memory mapping the file. | |||
| NoGrowSync bool | |||
| // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to | |||
| // grab a shared lock (UNIX). | |||
| ReadOnly bool | |||
| // Sets the DB.MmapFlags flag before memory mapping the file. | |||
| MmapFlags int | |||
| // InitialMmapSize is the initial mmap size of the database | |||
| // in bytes. Read transactions won't block write transaction | |||
| // if the InitialMmapSize is large enough to hold database mmap | |||
| // size. (See DB.Begin for more information) | |||
| // | |||
| // If <=0, the initial map size is 0. | |||
| // If initialMmapSize is smaller than the previous database size, | |||
| // it takes no effect. | |||
| InitialMmapSize int | |||
| } | |||
| // DefaultOptions represent the options used if nil options are passed into Open(). | |||
| // No timeout is used which will cause Bolt to wait indefinitely for a lock. | |||
| var DefaultOptions = &Options{ | |||
| Timeout: 0, | |||
| NoGrowSync: false, | |||
| } | |||
| // Stats represents statistics about the database. | |||
| type Stats struct { | |||
| // Freelist stats | |||
| FreePageN int // total number of free pages on the freelist | |||
| PendingPageN int // total number of pending pages on the freelist | |||
| FreeAlloc int // total bytes allocated in free pages | |||
| FreelistInuse int // total bytes used by the freelist | |||
| // Transaction stats | |||
| TxN int // total number of started read transactions | |||
| OpenTxN int // number of currently open read transactions | |||
| TxStats TxStats // global, ongoing stats. | |||
| } | |||
| // Sub calculates and returns the difference between two sets of database stats. | |||
| // This is useful when obtaining stats at two different points and time and | |||
| // you need the performance counters that occurred within that time span. | |||
| func (s *Stats) Sub(other *Stats) Stats { | |||
| if other == nil { | |||
| return *s | |||
| } | |||
| var diff Stats | |||
| diff.FreePageN = s.FreePageN | |||
| diff.PendingPageN = s.PendingPageN | |||
| diff.FreeAlloc = s.FreeAlloc | |||
| diff.FreelistInuse = s.FreelistInuse | |||
| diff.TxN = other.TxN - s.TxN | |||
| diff.TxStats = s.TxStats.Sub(&other.TxStats) | |||
| return diff | |||
| } | |||
| func (s *Stats) add(other *Stats) { | |||
| s.TxStats.add(&other.TxStats) | |||
| } | |||
| type Info struct { | |||
| Data uintptr | |||
| PageSize int | |||
| } | |||
| type meta struct { | |||
| magic uint32 | |||
| version uint32 | |||
| pageSize uint32 | |||
| flags uint32 | |||
| root bucket | |||
| freelist pgid | |||
| pgid pgid | |||
| txid txid | |||
| checksum uint64 | |||
| } | |||
| // validate checks the marker bytes and version of the meta page to ensure it matches this binary. | |||
| func (m *meta) validate() error { | |||
| if m.checksum != 0 && m.checksum != m.sum64() { | |||
| return ErrChecksum | |||
| } else if m.magic != magic { | |||
| return ErrInvalid | |||
| } else if m.version != version { | |||
| return ErrVersionMismatch | |||
| } | |||
| return nil | |||
| } | |||
| // copy copies one meta object to another. | |||
| func (m *meta) copy(dest *meta) { | |||
| *dest = *m | |||
| } | |||
| // write writes the meta onto a page. | |||
| func (m *meta) write(p *page) { | |||
| if m.root.root >= m.pgid { | |||
| panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) | |||
| } else if m.freelist >= m.pgid { | |||
| panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) | |||
| } | |||
| // Page id is either going to be 0 or 1 which we can determine by the transaction ID. | |||
| p.id = pgid(m.txid % 2) | |||
| p.flags |= metaPageFlag | |||
| // Calculate the checksum. | |||
| m.checksum = m.sum64() | |||
| m.copy(p.meta()) | |||
| } | |||
| // generates the checksum for the meta. | |||
| func (m *meta) sum64() uint64 { | |||
| var h = fnv.New64a() | |||
| _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) | |||
| return h.Sum64() | |||
| } | |||
| // _assert will panic with a given formatted message if the given condition is false. | |||
| func _assert(condition bool, msg string, v ...interface{}) { | |||
| if !condition { | |||
| panic(fmt.Sprintf("assertion failed: "+msg, v...)) | |||
| } | |||
| } | |||
| func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } | |||
| func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } | |||
| func printstack() { | |||
| stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") | |||
| fmt.Fprintln(os.Stderr, stack) | |||
| } | |||
| @@ -0,0 +1,44 @@ | |||
| /* | |||
| Package bolt implements a low-level key/value store in pure Go. It supports | |||
| fully serializable transactions, ACID semantics, and lock-free MVCC with | |||
| multiple readers and a single writer. Bolt can be used for projects that | |||
| want a simple data store without the need to add large dependencies such as | |||
| Postgres or MySQL. | |||
| Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is | |||
| optimized for fast read access and does not require recovery in the event of a | |||
| system crash. Transactions which have not finished committing will simply be | |||
| rolled back in the event of a crash. | |||
| The design of Bolt is based on Howard Chu's LMDB database project. | |||
| Bolt currently works on Windows, Mac OS X, and Linux. | |||
| Basics | |||
| There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is | |||
| a collection of buckets and is represented by a single file on disk. A bucket is | |||
| a collection of unique keys that are associated with values. | |||
| Transactions provide either read-only or read-write access to the database. | |||
| Read-only transactions can retrieve key/value pairs and can use Cursors to | |||
| iterate over the dataset sequentially. Read-write transactions can create and | |||
| delete buckets and can insert and remove keys. Only one read-write transaction | |||
| is allowed at a time. | |||
| Caveats | |||
| The database uses a read-only, memory-mapped data file to ensure that | |||
| applications cannot corrupt the database, however, this means that keys and | |||
| values returned from Bolt cannot be changed. Writing to a read-only byte slice | |||
| will cause Go to panic. | |||
| Keys and values retrieved from the database are only valid for the life of | |||
| the transaction. When used outside the transaction, these byte slices can | |||
| point to different data or can point to invalid memory which will cause a panic. | |||
| */ | |||
| package bolt | |||
| @@ -0,0 +1,70 @@ | |||
| package bolt | |||
| import "errors" | |||
| // These errors can be returned when opening or calling methods on a DB. | |||
| var ( | |||
| // ErrDatabaseNotOpen is returned when a DB instance is accessed before it | |||
| // is opened or after it is closed. | |||
| ErrDatabaseNotOpen = errors.New("database not open") | |||
| // ErrDatabaseOpen is returned when opening a database that is | |||
| // already open. | |||
| ErrDatabaseOpen = errors.New("database already open") | |||
| // ErrInvalid is returned when a data file is not a Bolt-formatted database. | |||
| ErrInvalid = errors.New("invalid database") | |||
| // ErrVersionMismatch is returned when the data file was created with a | |||
| // different version of Bolt. | |||
| ErrVersionMismatch = errors.New("version mismatch") | |||
| // ErrChecksum is returned when either meta page checksum does not match. | |||
| ErrChecksum = errors.New("checksum error") | |||
| // ErrTimeout is returned when a database cannot obtain an exclusive lock | |||
| // on the data file after the timeout passed to Open(). | |||
| ErrTimeout = errors.New("timeout") | |||
| ) | |||
| // These errors can occur when beginning or committing a Tx. | |||
| var ( | |||
| // ErrTxNotWritable is returned when performing a write operation on a | |||
| // read-only transaction. | |||
| ErrTxNotWritable = errors.New("tx not writable") | |||
| // ErrTxClosed is returned when committing or rolling back a transaction | |||
| // that has already been committed or rolled back. | |||
| ErrTxClosed = errors.New("tx closed") | |||
| // ErrDatabaseReadOnly is returned when a mutating transaction is started on a | |||
| // read-only database. | |||
| ErrDatabaseReadOnly = errors.New("database is in read-only mode") | |||
| ) | |||
| // These errors can occur when putting or deleting a value or a bucket. | |||
| var ( | |||
| // ErrBucketNotFound is returned when trying to access a bucket that has | |||
| // not been created yet. | |||
| ErrBucketNotFound = errors.New("bucket not found") | |||
| // ErrBucketExists is returned when creating a bucket that already exists. | |||
| ErrBucketExists = errors.New("bucket already exists") | |||
| // ErrBucketNameRequired is returned when creating a bucket with a blank name. | |||
| ErrBucketNameRequired = errors.New("bucket name required") | |||
| // ErrKeyRequired is returned when inserting a zero-length key. | |||
| ErrKeyRequired = errors.New("key required") | |||
| // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. | |||
| ErrKeyTooLarge = errors.New("key too large") | |||
| // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. | |||
| ErrValueTooLarge = errors.New("value too large") | |||
| // ErrIncompatibleValue is returned when trying create or delete a bucket | |||
| // on an existing non-bucket key or when trying to create or delete a | |||
| // non-bucket key on an existing bucket key. | |||
| ErrIncompatibleValue = errors.New("incompatible value") | |||
| ) | |||
| @@ -0,0 +1,242 @@ | |||
| package bolt | |||
| import ( | |||
| "fmt" | |||
| "sort" | |||
| "unsafe" | |||
| ) | |||
| // freelist represents a list of all pages that are available for allocation. | |||
| // It also tracks pages that have been freed but are still in use by open transactions. | |||
| type freelist struct { | |||
| ids []pgid // all free and available free page ids. | |||
| pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. | |||
| cache map[pgid]bool // fast lookup of all free and pending page ids. | |||
| } | |||
| // newFreelist returns an empty, initialized freelist. | |||
| func newFreelist() *freelist { | |||
| return &freelist{ | |||
| pending: make(map[txid][]pgid), | |||
| cache: make(map[pgid]bool), | |||
| } | |||
| } | |||
| // size returns the size of the page after serialization. | |||
| func (f *freelist) size() int { | |||
| return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) | |||
| } | |||
| // count returns count of pages on the freelist | |||
| func (f *freelist) count() int { | |||
| return f.free_count() + f.pending_count() | |||
| } | |||
| // free_count returns count of free pages | |||
| func (f *freelist) free_count() int { | |||
| return len(f.ids) | |||
| } | |||
| // pending_count returns count of pending pages | |||
| func (f *freelist) pending_count() int { | |||
| var count int | |||
| for _, list := range f.pending { | |||
| count += len(list) | |||
| } | |||
| return count | |||
| } | |||
| // all returns a list of all free ids and all pending ids in one sorted list. | |||
| func (f *freelist) all() []pgid { | |||
| m := make(pgids, 0) | |||
| for _, list := range f.pending { | |||
| m = append(m, list...) | |||
| } | |||
| sort.Sort(m) | |||
| return pgids(f.ids).merge(m) | |||
| } | |||
| // allocate returns the starting page id of a contiguous list of pages of a given size. | |||
| // If a contiguous block cannot be found then 0 is returned. | |||
| func (f *freelist) allocate(n int) pgid { | |||
| if len(f.ids) == 0 { | |||
| return 0 | |||
| } | |||
| var initial, previd pgid | |||
| for i, id := range f.ids { | |||
| if id <= 1 { | |||
| panic(fmt.Sprintf("invalid page allocation: %d", id)) | |||
| } | |||
| // Reset initial page if this is not contiguous. | |||
| if previd == 0 || id-previd != 1 { | |||
| initial = id | |||
| } | |||
| // If we found a contiguous block then remove it and return it. | |||
| if (id-initial)+1 == pgid(n) { | |||
| // If we're allocating off the beginning then take the fast path | |||
| // and just adjust the existing slice. This will use extra memory | |||
| // temporarily but the append() in free() will realloc the slice | |||
| // as is necessary. | |||
| if (i + 1) == n { | |||
| f.ids = f.ids[i+1:] | |||
| } else { | |||
| copy(f.ids[i-n+1:], f.ids[i+1:]) | |||
| f.ids = f.ids[:len(f.ids)-n] | |||
| } | |||
| // Remove from the free cache. | |||
| for i := pgid(0); i < pgid(n); i++ { | |||
| delete(f.cache, initial+i) | |||
| } | |||
| return initial | |||
| } | |||
| previd = id | |||
| } | |||
| return 0 | |||
| } | |||
| // free releases a page and its overflow for a given transaction id. | |||
| // If the page is already free then a panic will occur. | |||
| func (f *freelist) free(txid txid, p *page) { | |||
| if p.id <= 1 { | |||
| panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) | |||
| } | |||
| // Free page and all its overflow pages. | |||
| var ids = f.pending[txid] | |||
| for id := p.id; id <= p.id+pgid(p.overflow); id++ { | |||
| // Verify that page is not already free. | |||
| if f.cache[id] { | |||
| panic(fmt.Sprintf("page %d already freed", id)) | |||
| } | |||
| // Add to the freelist and cache. | |||
| ids = append(ids, id) | |||
| f.cache[id] = true | |||
| } | |||
| f.pending[txid] = ids | |||
| } | |||
| // release moves all page ids for a transaction id (or older) to the freelist. | |||
| func (f *freelist) release(txid txid) { | |||
| m := make(pgids, 0) | |||
| for tid, ids := range f.pending { | |||
| if tid <= txid { | |||
| // Move transaction's pending pages to the available freelist. | |||
| // Don't remove from the cache since the page is still free. | |||
| m = append(m, ids...) | |||
| delete(f.pending, tid) | |||
| } | |||
| } | |||
| sort.Sort(m) | |||
| f.ids = pgids(f.ids).merge(m) | |||
| } | |||
| // rollback removes the pages from a given pending tx. | |||
| func (f *freelist) rollback(txid txid) { | |||
| // Remove page ids from cache. | |||
| for _, id := range f.pending[txid] { | |||
| delete(f.cache, id) | |||
| } | |||
| // Remove pages from pending list. | |||
| delete(f.pending, txid) | |||
| } | |||
| // freed returns whether a given page is in the free list. | |||
| func (f *freelist) freed(pgid pgid) bool { | |||
| return f.cache[pgid] | |||
| } | |||
| // read initializes the freelist from a freelist page. | |||
| func (f *freelist) read(p *page) { | |||
| // If the page.count is at the max uint16 value (64k) then it's considered | |||
| // an overflow and the size of the freelist is stored as the first element. | |||
| idx, count := 0, int(p.count) | |||
| if count == 0xFFFF { | |||
| idx = 1 | |||
| count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) | |||
| } | |||
| // Copy the list of page ids from the freelist. | |||
| ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] | |||
| f.ids = make([]pgid, len(ids)) | |||
| copy(f.ids, ids) | |||
| // Make sure they're sorted. | |||
| sort.Sort(pgids(f.ids)) | |||
| // Rebuild the page cache. | |||
| f.reindex() | |||
| } | |||
| // write writes the page ids onto a freelist page. All free and pending ids are | |||
| // saved to disk since in the event of a program crash, all pending ids will | |||
| // become free. | |||
| func (f *freelist) write(p *page) error { | |||
| // Combine the old free pgids and pgids waiting on an open transaction. | |||
| ids := f.all() | |||
| // Update the header flag. | |||
| p.flags |= freelistPageFlag | |||
| // The page.count can only hold up to 64k elements so if we overflow that | |||
| // number then we handle it by putting the size in the first element. | |||
| if len(ids) < 0xFFFF { | |||
| p.count = uint16(len(ids)) | |||
| copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) | |||
| } else { | |||
| p.count = 0xFFFF | |||
| ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) | |||
| copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) | |||
| } | |||
| return nil | |||
| } | |||
| // reload reads the freelist from a page and filters out pending items. | |||
| func (f *freelist) reload(p *page) { | |||
| f.read(p) | |||
| // Build a cache of only pending pages. | |||
| pcache := make(map[pgid]bool) | |||
| for _, pendingIDs := range f.pending { | |||
| for _, pendingID := range pendingIDs { | |||
| pcache[pendingID] = true | |||
| } | |||
| } | |||
| // Check each page in the freelist and build a new available freelist | |||
| // with any pages not in the pending lists. | |||
| var a []pgid | |||
| for _, id := range f.ids { | |||
| if !pcache[id] { | |||
| a = append(a, id) | |||
| } | |||
| } | |||
| f.ids = a | |||
| // Once the available list is rebuilt then rebuild the free cache so that | |||
| // it includes the available and pending free pages. | |||
| f.reindex() | |||
| } | |||
| // reindex rebuilds the free cache based on available and pending free lists. | |||
| func (f *freelist) reindex() { | |||
| f.cache = make(map[pgid]bool) | |||
| for _, id := range f.ids { | |||
| f.cache[id] = true | |||
| } | |||
| for _, pendingIDs := range f.pending { | |||
| for _, pendingID := range pendingIDs { | |||
| f.cache[pendingID] = true | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,599 @@ | |||
| package bolt | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "sort" | |||
| "unsafe" | |||
| ) | |||
| // node represents an in-memory, deserialized page. | |||
| type node struct { | |||
| bucket *Bucket | |||
| isLeaf bool | |||
| unbalanced bool | |||
| spilled bool | |||
| key []byte | |||
| pgid pgid | |||
| parent *node | |||
| children nodes | |||
| inodes inodes | |||
| } | |||
| // root returns the top-level node this node is attached to. | |||
| func (n *node) root() *node { | |||
| if n.parent == nil { | |||
| return n | |||
| } | |||
| return n.parent.root() | |||
| } | |||
| // minKeys returns the minimum number of inodes this node should have. | |||
| func (n *node) minKeys() int { | |||
| if n.isLeaf { | |||
| return 1 | |||
| } | |||
| return 2 | |||
| } | |||
| // size returns the size of the node after serialization. | |||
| func (n *node) size() int { | |||
| sz, elsz := pageHeaderSize, n.pageElementSize() | |||
| for i := 0; i < len(n.inodes); i++ { | |||
| item := &n.inodes[i] | |||
| sz += elsz + len(item.key) + len(item.value) | |||
| } | |||
| return sz | |||
| } | |||
| // sizeLessThan returns true if the node is less than a given size. | |||
| // This is an optimization to avoid calculating a large node when we only need | |||
| // to know if it fits inside a certain page size. | |||
| func (n *node) sizeLessThan(v int) bool { | |||
| sz, elsz := pageHeaderSize, n.pageElementSize() | |||
| for i := 0; i < len(n.inodes); i++ { | |||
| item := &n.inodes[i] | |||
| sz += elsz + len(item.key) + len(item.value) | |||
| if sz >= v { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| // pageElementSize returns the size of each page element based on the type of node. | |||
| func (n *node) pageElementSize() int { | |||
| if n.isLeaf { | |||
| return leafPageElementSize | |||
| } | |||
| return branchPageElementSize | |||
| } | |||
| // childAt returns the child node at a given index. | |||
| func (n *node) childAt(index int) *node { | |||
| if n.isLeaf { | |||
| panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) | |||
| } | |||
| return n.bucket.node(n.inodes[index].pgid, n) | |||
| } | |||
| // childIndex returns the index of a given child node. | |||
| func (n *node) childIndex(child *node) int { | |||
| index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) | |||
| return index | |||
| } | |||
| // numChildren returns the number of children. | |||
| func (n *node) numChildren() int { | |||
| return len(n.inodes) | |||
| } | |||
| // nextSibling returns the next node with the same parent. | |||
| func (n *node) nextSibling() *node { | |||
| if n.parent == nil { | |||
| return nil | |||
| } | |||
| index := n.parent.childIndex(n) | |||
| if index >= n.parent.numChildren()-1 { | |||
| return nil | |||
| } | |||
| return n.parent.childAt(index + 1) | |||
| } | |||
| // prevSibling returns the previous node with the same parent. | |||
| func (n *node) prevSibling() *node { | |||
| if n.parent == nil { | |||
| return nil | |||
| } | |||
| index := n.parent.childIndex(n) | |||
| if index == 0 { | |||
| return nil | |||
| } | |||
| return n.parent.childAt(index - 1) | |||
| } | |||
| // put inserts a key/value. | |||
| func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { | |||
| if pgid >= n.bucket.tx.meta.pgid { | |||
| panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) | |||
| } else if len(oldKey) <= 0 { | |||
| panic("put: zero-length old key") | |||
| } else if len(newKey) <= 0 { | |||
| panic("put: zero-length new key") | |||
| } | |||
| // Find insertion index. | |||
| index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) | |||
| // Add capacity and shift nodes if we don't have an exact match and need to insert. | |||
| exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) | |||
| if !exact { | |||
| n.inodes = append(n.inodes, inode{}) | |||
| copy(n.inodes[index+1:], n.inodes[index:]) | |||
| } | |||
| inode := &n.inodes[index] | |||
| inode.flags = flags | |||
| inode.key = newKey | |||
| inode.value = value | |||
| inode.pgid = pgid | |||
| _assert(len(inode.key) > 0, "put: zero-length inode key") | |||
| } | |||
| // del removes a key from the node. | |||
| func (n *node) del(key []byte) { | |||
| // Find index of key. | |||
| index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) | |||
| // Exit if the key isn't found. | |||
| if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { | |||
| return | |||
| } | |||
| // Delete inode from the node. | |||
| n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) | |||
| // Mark the node as needing rebalancing. | |||
| n.unbalanced = true | |||
| } | |||
| // read initializes the node from a page. | |||
| func (n *node) read(p *page) { | |||
| n.pgid = p.id | |||
| n.isLeaf = ((p.flags & leafPageFlag) != 0) | |||
| n.inodes = make(inodes, int(p.count)) | |||
| for i := 0; i < int(p.count); i++ { | |||
| inode := &n.inodes[i] | |||
| if n.isLeaf { | |||
| elem := p.leafPageElement(uint16(i)) | |||
| inode.flags = elem.flags | |||
| inode.key = elem.key() | |||
| inode.value = elem.value() | |||
| } else { | |||
| elem := p.branchPageElement(uint16(i)) | |||
| inode.pgid = elem.pgid | |||
| inode.key = elem.key() | |||
| } | |||
| _assert(len(inode.key) > 0, "read: zero-length inode key") | |||
| } | |||
| // Save first key so we can find the node in the parent when we spill. | |||
| if len(n.inodes) > 0 { | |||
| n.key = n.inodes[0].key | |||
| _assert(len(n.key) > 0, "read: zero-length node key") | |||
| } else { | |||
| n.key = nil | |||
| } | |||
| } | |||
| // write writes the items onto one or more pages. | |||
| func (n *node) write(p *page) { | |||
| // Initialize page. | |||
| if n.isLeaf { | |||
| p.flags |= leafPageFlag | |||
| } else { | |||
| p.flags |= branchPageFlag | |||
| } | |||
| if len(n.inodes) >= 0xFFFF { | |||
| panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) | |||
| } | |||
| p.count = uint16(len(n.inodes)) | |||
| // Loop over each item and write it to the page. | |||
| b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] | |||
| for i, item := range n.inodes { | |||
| _assert(len(item.key) > 0, "write: zero-length inode key") | |||
| // Write the page element. | |||
| if n.isLeaf { | |||
| elem := p.leafPageElement(uint16(i)) | |||
| elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) | |||
| elem.flags = item.flags | |||
| elem.ksize = uint32(len(item.key)) | |||
| elem.vsize = uint32(len(item.value)) | |||
| } else { | |||
| elem := p.branchPageElement(uint16(i)) | |||
| elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) | |||
| elem.ksize = uint32(len(item.key)) | |||
| elem.pgid = item.pgid | |||
| _assert(elem.pgid != p.id, "write: circular dependency occurred") | |||
| } | |||
| // If the length of key+value is larger than the max allocation size | |||
| // then we need to reallocate the byte array pointer. | |||
| // | |||
| // See: https://github.com/boltdb/bolt/pull/335 | |||
| klen, vlen := len(item.key), len(item.value) | |||
| if len(b) < klen+vlen { | |||
| b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] | |||
| } | |||
| // Write data for the element to the end of the page. | |||
| copy(b[0:], item.key) | |||
| b = b[klen:] | |||
| copy(b[0:], item.value) | |||
| b = b[vlen:] | |||
| } | |||
| // DEBUG ONLY: n.dump() | |||
| } | |||
| // split breaks up a node into multiple smaller nodes, if appropriate. | |||
| // This should only be called from the spill() function. | |||
| func (n *node) split(pageSize int) []*node { | |||
| var nodes []*node | |||
| node := n | |||
| for { | |||
| // Split node into two. | |||
| a, b := node.splitTwo(pageSize) | |||
| nodes = append(nodes, a) | |||
| // If we can't split then exit the loop. | |||
| if b == nil { | |||
| break | |||
| } | |||
| // Set node to b so it gets split on the next iteration. | |||
| node = b | |||
| } | |||
| return nodes | |||
| } | |||
| // splitTwo breaks up a node into two smaller nodes, if appropriate. | |||
| // This should only be called from the split() function. | |||
| func (n *node) splitTwo(pageSize int) (*node, *node) { | |||
| // Ignore the split if the page doesn't have at least enough nodes for | |||
| // two pages or if the nodes can fit in a single page. | |||
| if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { | |||
| return n, nil | |||
| } | |||
| // Determine the threshold before starting a new node. | |||
| var fillPercent = n.bucket.FillPercent | |||
| if fillPercent < minFillPercent { | |||
| fillPercent = minFillPercent | |||
| } else if fillPercent > maxFillPercent { | |||
| fillPercent = maxFillPercent | |||
| } | |||
| threshold := int(float64(pageSize) * fillPercent) | |||
| // Determine split position and sizes of the two pages. | |||
| splitIndex, _ := n.splitIndex(threshold) | |||
| // Split node into two separate nodes. | |||
| // If there's no parent then we'll need to create one. | |||
| if n.parent == nil { | |||
| n.parent = &node{bucket: n.bucket, children: []*node{n}} | |||
| } | |||
| // Create a new node and add it to the parent. | |||
| next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} | |||
| n.parent.children = append(n.parent.children, next) | |||
| // Split inodes across two nodes. | |||
| next.inodes = n.inodes[splitIndex:] | |||
| n.inodes = n.inodes[:splitIndex] | |||
| // Update the statistics. | |||
| n.bucket.tx.stats.Split++ | |||
| return n, next | |||
| } | |||
| // splitIndex finds the position where a page will fill a given threshold. | |||
| // It returns the index as well as the size of the first page. | |||
| // This is only be called from split(). | |||
| func (n *node) splitIndex(threshold int) (index, sz int) { | |||
| sz = pageHeaderSize | |||
| // Loop until we only have the minimum number of keys required for the second page. | |||
| for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { | |||
| index = i | |||
| inode := n.inodes[i] | |||
| elsize := n.pageElementSize() + len(inode.key) + len(inode.value) | |||
| // If we have at least the minimum number of keys and adding another | |||
| // node would put us over the threshold then exit and return. | |||
| if i >= minKeysPerPage && sz+elsize > threshold { | |||
| break | |||
| } | |||
| // Add the element size to the total size. | |||
| sz += elsize | |||
| } | |||
| return | |||
| } | |||
| // spill writes the nodes to dirty pages and splits nodes as it goes. | |||
| // Returns an error if dirty pages cannot be allocated. | |||
| func (n *node) spill() error { | |||
| var tx = n.bucket.tx | |||
| if n.spilled { | |||
| return nil | |||
| } | |||
| // Spill child nodes first. Child nodes can materialize sibling nodes in | |||
| // the case of split-merge so we cannot use a range loop. We have to check | |||
| // the children size on every loop iteration. | |||
| sort.Sort(n.children) | |||
| for i := 0; i < len(n.children); i++ { | |||
| if err := n.children[i].spill(); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| // We no longer need the child list because it's only used for spill tracking. | |||
| n.children = nil | |||
| // Split nodes into appropriate sizes. The first node will always be n. | |||
| var nodes = n.split(tx.db.pageSize) | |||
| for _, node := range nodes { | |||
| // Add node's page to the freelist if it's not new. | |||
| if node.pgid > 0 { | |||
| tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) | |||
| node.pgid = 0 | |||
| } | |||
| // Allocate contiguous space for the node. | |||
| p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // Write the node. | |||
| if p.id >= tx.meta.pgid { | |||
| panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) | |||
| } | |||
| node.pgid = p.id | |||
| node.write(p) | |||
| node.spilled = true | |||
| // Insert into parent inodes. | |||
| if node.parent != nil { | |||
| var key = node.key | |||
| if key == nil { | |||
| key = node.inodes[0].key | |||
| } | |||
| node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) | |||
| node.key = node.inodes[0].key | |||
| _assert(len(node.key) > 0, "spill: zero-length node key") | |||
| } | |||
| // Update the statistics. | |||
| tx.stats.Spill++ | |||
| } | |||
| // If the root node split and created a new root then we need to spill that | |||
| // as well. We'll clear out the children to make sure it doesn't try to respill. | |||
| if n.parent != nil && n.parent.pgid == 0 { | |||
| n.children = nil | |||
| return n.parent.spill() | |||
| } | |||
| return nil | |||
| } | |||
| // rebalance attempts to combine the node with sibling nodes if the node fill | |||
| // size is below a threshold or if there are not enough keys. | |||
| func (n *node) rebalance() { | |||
| if !n.unbalanced { | |||
| return | |||
| } | |||
| n.unbalanced = false | |||
| // Update statistics. | |||
| n.bucket.tx.stats.Rebalance++ | |||
| // Ignore if node is above threshold (25%) and has enough keys. | |||
| var threshold = n.bucket.tx.db.pageSize / 4 | |||
| if n.size() > threshold && len(n.inodes) > n.minKeys() { | |||
| return | |||
| } | |||
| // Root node has special handling. | |||
| if n.parent == nil { | |||
| // If root node is a branch and only has one node then collapse it. | |||
| if !n.isLeaf && len(n.inodes) == 1 { | |||
| // Move root's child up. | |||
| child := n.bucket.node(n.inodes[0].pgid, n) | |||
| n.isLeaf = child.isLeaf | |||
| n.inodes = child.inodes[:] | |||
| n.children = child.children | |||
| // Reparent all child nodes being moved. | |||
| for _, inode := range n.inodes { | |||
| if child, ok := n.bucket.nodes[inode.pgid]; ok { | |||
| child.parent = n | |||
| } | |||
| } | |||
| // Remove old child. | |||
| child.parent = nil | |||
| delete(n.bucket.nodes, child.pgid) | |||
| child.free() | |||
| } | |||
| return | |||
| } | |||
| // If node has no keys then just remove it. | |||
| if n.numChildren() == 0 { | |||
| n.parent.del(n.key) | |||
| n.parent.removeChild(n) | |||
| delete(n.bucket.nodes, n.pgid) | |||
| n.free() | |||
| n.parent.rebalance() | |||
| return | |||
| } | |||
| _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") | |||
| // Destination node is right sibling if idx == 0, otherwise left sibling. | |||
| var target *node | |||
| var useNextSibling = (n.parent.childIndex(n) == 0) | |||
| if useNextSibling { | |||
| target = n.nextSibling() | |||
| } else { | |||
| target = n.prevSibling() | |||
| } | |||
| // If both this node and the target node are too small then merge them. | |||
| if useNextSibling { | |||
| // Reparent all child nodes being moved. | |||
| for _, inode := range target.inodes { | |||
| if child, ok := n.bucket.nodes[inode.pgid]; ok { | |||
| child.parent.removeChild(child) | |||
| child.parent = n | |||
| child.parent.children = append(child.parent.children, child) | |||
| } | |||
| } | |||
| // Copy over inodes from target and remove target. | |||
| n.inodes = append(n.inodes, target.inodes...) | |||
| n.parent.del(target.key) | |||
| n.parent.removeChild(target) | |||
| delete(n.bucket.nodes, target.pgid) | |||
| target.free() | |||
| } else { | |||
| // Reparent all child nodes being moved. | |||
| for _, inode := range n.inodes { | |||
| if child, ok := n.bucket.nodes[inode.pgid]; ok { | |||
| child.parent.removeChild(child) | |||
| child.parent = target | |||
| child.parent.children = append(child.parent.children, child) | |||
| } | |||
| } | |||
| // Copy over inodes to target and remove node. | |||
| target.inodes = append(target.inodes, n.inodes...) | |||
| n.parent.del(n.key) | |||
| n.parent.removeChild(n) | |||
| delete(n.bucket.nodes, n.pgid) | |||
| n.free() | |||
| } | |||
| // Either this node or the target node was deleted from the parent so rebalance it. | |||
| n.parent.rebalance() | |||
| } | |||
| // removes a node from the list of in-memory children. | |||
| // This does not affect the inodes. | |||
| func (n *node) removeChild(target *node) { | |||
| for i, child := range n.children { | |||
| if child == target { | |||
| n.children = append(n.children[:i], n.children[i+1:]...) | |||
| return | |||
| } | |||
| } | |||
| } | |||
| // dereference causes the node to copy all its inode key/value references to heap memory. | |||
| // This is required when the mmap is reallocated so inodes are not pointing to stale data. | |||
| func (n *node) dereference() { | |||
| if n.key != nil { | |||
| key := make([]byte, len(n.key)) | |||
| copy(key, n.key) | |||
| n.key = key | |||
| _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") | |||
| } | |||
| for i := range n.inodes { | |||
| inode := &n.inodes[i] | |||
| key := make([]byte, len(inode.key)) | |||
| copy(key, inode.key) | |||
| inode.key = key | |||
| _assert(len(inode.key) > 0, "dereference: zero-length inode key") | |||
| value := make([]byte, len(inode.value)) | |||
| copy(value, inode.value) | |||
| inode.value = value | |||
| } | |||
| // Recursively dereference children. | |||
| for _, child := range n.children { | |||
| child.dereference() | |||
| } | |||
| // Update statistics. | |||
| n.bucket.tx.stats.NodeDeref++ | |||
| } | |||
| // free adds the node's underlying page to the freelist. | |||
| func (n *node) free() { | |||
| if n.pgid != 0 { | |||
| n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) | |||
| n.pgid = 0 | |||
| } | |||
| } | |||
| // dump writes the contents of the node to STDERR for debugging purposes. | |||
| /* | |||
| func (n *node) dump() { | |||
| // Write node header. | |||
| var typ = "branch" | |||
| if n.isLeaf { | |||
| typ = "leaf" | |||
| } | |||
| warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) | |||
| // Write out abbreviated version of each item. | |||
| for _, item := range n.inodes { | |||
| if n.isLeaf { | |||
| if item.flags&bucketLeafFlag != 0 { | |||
| bucket := (*bucket)(unsafe.Pointer(&item.value[0])) | |||
| warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) | |||
| } else { | |||
| warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) | |||
| } | |||
| } else { | |||
| warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) | |||
| } | |||
| } | |||
| warn("") | |||
| } | |||
| */ | |||
| type nodes []*node | |||
| func (s nodes) Len() int { return len(s) } | |||
| func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |||
| func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } | |||
| // inode represents an internal node inside of a node. | |||
| // It can be used to point to elements in a page or point | |||
| // to an element which hasn't been added to a page yet. | |||
| type inode struct { | |||
| flags uint32 | |||
| pgid pgid | |||
| key []byte | |||
| value []byte | |||
| } | |||
| type inodes []inode | |||
| @@ -0,0 +1,172 @@ | |||
| package bolt | |||
| import ( | |||
| "fmt" | |||
| "os" | |||
| "sort" | |||
| "unsafe" | |||
| ) | |||
| const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) | |||
| const minKeysPerPage = 2 | |||
| const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) | |||
| const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) | |||
| const ( | |||
| branchPageFlag = 0x01 | |||
| leafPageFlag = 0x02 | |||
| metaPageFlag = 0x04 | |||
| freelistPageFlag = 0x10 | |||
| ) | |||
| const ( | |||
| bucketLeafFlag = 0x01 | |||
| ) | |||
| type pgid uint64 | |||
| type page struct { | |||
| id pgid | |||
| flags uint16 | |||
| count uint16 | |||
| overflow uint32 | |||
| ptr uintptr | |||
| } | |||
| // typ returns a human readable page type string used for debugging. | |||
| func (p *page) typ() string { | |||
| if (p.flags & branchPageFlag) != 0 { | |||
| return "branch" | |||
| } else if (p.flags & leafPageFlag) != 0 { | |||
| return "leaf" | |||
| } else if (p.flags & metaPageFlag) != 0 { | |||
| return "meta" | |||
| } else if (p.flags & freelistPageFlag) != 0 { | |||
| return "freelist" | |||
| } | |||
| return fmt.Sprintf("unknown<%02x>", p.flags) | |||
| } | |||
| // meta returns a pointer to the metadata section of the page. | |||
| func (p *page) meta() *meta { | |||
| return (*meta)(unsafe.Pointer(&p.ptr)) | |||
| } | |||
| // leafPageElement retrieves the leaf node by index | |||
| func (p *page) leafPageElement(index uint16) *leafPageElement { | |||
| n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] | |||
| return n | |||
| } | |||
| // leafPageElements retrieves a list of leaf nodes. | |||
| func (p *page) leafPageElements() []leafPageElement { | |||
| return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] | |||
| } | |||
| // branchPageElement retrieves the branch node by index | |||
| func (p *page) branchPageElement(index uint16) *branchPageElement { | |||
| return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] | |||
| } | |||
| // branchPageElements retrieves a list of branch nodes. | |||
| func (p *page) branchPageElements() []branchPageElement { | |||
| return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] | |||
| } | |||
| // dump writes n bytes of the page to STDERR as hex output. | |||
| func (p *page) hexdump(n int) { | |||
| buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] | |||
| fmt.Fprintf(os.Stderr, "%x\n", buf) | |||
| } | |||
| type pages []*page | |||
| func (s pages) Len() int { return len(s) } | |||
| func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |||
| func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } | |||
| // branchPageElement represents a node on a branch page. | |||
| type branchPageElement struct { | |||
| pos uint32 | |||
| ksize uint32 | |||
| pgid pgid | |||
| } | |||
| // key returns a byte slice of the node key. | |||
| func (n *branchPageElement) key() []byte { | |||
| buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) | |||
| return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] | |||
| } | |||
| // leafPageElement represents a node on a leaf page. | |||
| type leafPageElement struct { | |||
| flags uint32 | |||
| pos uint32 | |||
| ksize uint32 | |||
| vsize uint32 | |||
| } | |||
| // key returns a byte slice of the node key. | |||
| func (n *leafPageElement) key() []byte { | |||
| buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) | |||
| return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] | |||
| } | |||
| // value returns a byte slice of the node value. | |||
| func (n *leafPageElement) value() []byte { | |||
| buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) | |||
| return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] | |||
| } | |||
| // PageInfo represents human readable information about a page. | |||
| type PageInfo struct { | |||
| ID int | |||
| Type string | |||
| Count int | |||
| OverflowCount int | |||
| } | |||
| type pgids []pgid | |||
| func (s pgids) Len() int { return len(s) } | |||
| func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |||
| func (s pgids) Less(i, j int) bool { return s[i] < s[j] } | |||
| // merge returns the sorted union of a and b. | |||
| func (a pgids) merge(b pgids) pgids { | |||
| // Return the opposite slice if one is nil. | |||
| if len(a) == 0 { | |||
| return b | |||
| } else if len(b) == 0 { | |||
| return a | |||
| } | |||
| // Create a list to hold all elements from both lists. | |||
| merged := make(pgids, 0, len(a)+len(b)) | |||
| // Assign lead to the slice with a lower starting value, follow to the higher value. | |||
| lead, follow := a, b | |||
| if b[0] < a[0] { | |||
| lead, follow = b, a | |||
| } | |||
| // Continue while there are elements in the lead. | |||
| for len(lead) > 0 { | |||
| // Merge largest prefix of lead that is ahead of follow[0]. | |||
| n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) | |||
| merged = append(merged, lead[:n]...) | |||
| if n >= len(lead) { | |||
| break | |||
| } | |||
| // Swap lead and follow. | |||
| lead, follow = follow, lead[n:] | |||
| } | |||
| // Append what's left in follow. | |||
| merged = append(merged, follow...) | |||
| return merged | |||
| } | |||
| @@ -0,0 +1,666 @@ | |||
| package bolt | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "os" | |||
| "sort" | |||
| "strings" | |||
| "time" | |||
| "unsafe" | |||
| ) | |||
| // txid represents the internal transaction identifier. | |||
| type txid uint64 | |||
| // Tx represents a read-only or read/write transaction on the database. | |||
| // Read-only transactions can be used for retrieving values for keys and creating cursors. | |||
| // Read/write transactions can create and remove buckets and create and remove keys. | |||
| // | |||
| // IMPORTANT: You must commit or rollback transactions when you are done with | |||
| // them. Pages can not be reclaimed by the writer until no more transactions | |||
| // are using them. A long running read transaction can cause the database to | |||
| // quickly grow. | |||
| type Tx struct { | |||
| writable bool | |||
| managed bool | |||
| db *DB | |||
| meta *meta | |||
| root Bucket | |||
| pages map[pgid]*page | |||
| stats TxStats | |||
| commitHandlers []func() | |||
| // WriteFlag specifies the flag for write-related methods like WriteTo(). | |||
| // Tx opens the database file with the specified flag to copy the data. | |||
| // | |||
| // By default, the flag is unset, which works well for mostly in-memory | |||
| // workloads. For databases that are much larger than available RAM, | |||
| // set the flag to syscall.O_DIRECT to avoid trashing the page cache. | |||
| WriteFlag int | |||
| } | |||
| // init initializes the transaction. | |||
| func (tx *Tx) init(db *DB) { | |||
| tx.db = db | |||
| tx.pages = nil | |||
| // Copy the meta page since it can be changed by the writer. | |||
| tx.meta = &meta{} | |||
| db.meta().copy(tx.meta) | |||
| // Copy over the root bucket. | |||
| tx.root = newBucket(tx) | |||
| tx.root.bucket = &bucket{} | |||
| *tx.root.bucket = tx.meta.root | |||
| // Increment the transaction id and add a page cache for writable transactions. | |||
| if tx.writable { | |||
| tx.pages = make(map[pgid]*page) | |||
| tx.meta.txid += txid(1) | |||
| } | |||
| } | |||
| // ID returns the transaction id. | |||
| func (tx *Tx) ID() int { | |||
| return int(tx.meta.txid) | |||
| } | |||
| // DB returns a reference to the database that created the transaction. | |||
| func (tx *Tx) DB() *DB { | |||
| return tx.db | |||
| } | |||
| // Size returns current database size in bytes as seen by this transaction. | |||
| func (tx *Tx) Size() int64 { | |||
| return int64(tx.meta.pgid) * int64(tx.db.pageSize) | |||
| } | |||
| // Writable returns whether the transaction can perform write operations. | |||
| func (tx *Tx) Writable() bool { | |||
| return tx.writable | |||
| } | |||
| // Cursor creates a cursor associated with the root bucket. | |||
| // All items in the cursor will return a nil value because all root bucket keys point to buckets. | |||
| // The cursor is only valid as long as the transaction is open. | |||
| // Do not use a cursor after the transaction is closed. | |||
| func (tx *Tx) Cursor() *Cursor { | |||
| return tx.root.Cursor() | |||
| } | |||
| // Stats retrieves a copy of the current transaction statistics. | |||
| func (tx *Tx) Stats() TxStats { | |||
| return tx.stats | |||
| } | |||
| // Bucket retrieves a bucket by name. | |||
| // Returns nil if the bucket does not exist. | |||
| // The bucket instance is only valid for the lifetime of the transaction. | |||
| func (tx *Tx) Bucket(name []byte) *Bucket { | |||
| return tx.root.Bucket(name) | |||
| } | |||
| // CreateBucket creates a new bucket. | |||
| // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. | |||
| // The bucket instance is only valid for the lifetime of the transaction. | |||
| func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { | |||
| return tx.root.CreateBucket(name) | |||
| } | |||
| // CreateBucketIfNotExists creates a new bucket if it doesn't already exist. | |||
| // Returns an error if the bucket name is blank, or if the bucket name is too long. | |||
| // The bucket instance is only valid for the lifetime of the transaction. | |||
| func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { | |||
| return tx.root.CreateBucketIfNotExists(name) | |||
| } | |||
| // DeleteBucket deletes a bucket. | |||
| // Returns an error if the bucket cannot be found or if the key represents a non-bucket value. | |||
| func (tx *Tx) DeleteBucket(name []byte) error { | |||
| return tx.root.DeleteBucket(name) | |||
| } | |||
| // ForEach executes a function for each bucket in the root. | |||
| // If the provided function returns an error then the iteration is stopped and | |||
| // the error is returned to the caller. | |||
| func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { | |||
| return tx.root.ForEach(func(k, v []byte) error { | |||
| if err := fn(k, tx.root.Bucket(k)); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| }) | |||
| } | |||
| // OnCommit adds a handler function to be executed after the transaction successfully commits. | |||
| func (tx *Tx) OnCommit(fn func()) { | |||
| tx.commitHandlers = append(tx.commitHandlers, fn) | |||
| } | |||
| // Commit writes all changes to disk and updates the meta page. | |||
| // Returns an error if a disk write error occurs, or if Commit is | |||
| // called on a read-only transaction. | |||
| func (tx *Tx) Commit() error { | |||
| _assert(!tx.managed, "managed tx commit not allowed") | |||
| if tx.db == nil { | |||
| return ErrTxClosed | |||
| } else if !tx.writable { | |||
| return ErrTxNotWritable | |||
| } | |||
| // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. | |||
| // Rebalance nodes which have had deletions. | |||
| var startTime = time.Now() | |||
| tx.root.rebalance() | |||
| if tx.stats.Rebalance > 0 { | |||
| tx.stats.RebalanceTime += time.Since(startTime) | |||
| } | |||
| // spill data onto dirty pages. | |||
| startTime = time.Now() | |||
| if err := tx.root.spill(); err != nil { | |||
| tx.rollback() | |||
| return err | |||
| } | |||
| tx.stats.SpillTime += time.Since(startTime) | |||
| // Free the old root bucket. | |||
| tx.meta.root.root = tx.root.root | |||
| opgid := tx.meta.pgid | |||
| // Free the freelist and allocate new pages for it. This will overestimate | |||
| // the size of the freelist but not underestimate the size (which would be bad). | |||
| tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) | |||
| p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) | |||
| if err != nil { | |||
| tx.rollback() | |||
| return err | |||
| } | |||
| if err := tx.db.freelist.write(p); err != nil { | |||
| tx.rollback() | |||
| return err | |||
| } | |||
| tx.meta.freelist = p.id | |||
| // If the high water mark has moved up then attempt to grow the database. | |||
| if tx.meta.pgid > opgid { | |||
| if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { | |||
| tx.rollback() | |||
| return err | |||
| } | |||
| } | |||
| // Write dirty pages to disk. | |||
| startTime = time.Now() | |||
| if err := tx.write(); err != nil { | |||
| tx.rollback() | |||
| return err | |||
| } | |||
| // If strict mode is enabled then perform a consistency check. | |||
| // Only the first consistency error is reported in the panic. | |||
| if tx.db.StrictMode { | |||
| ch := tx.Check() | |||
| var errs []string | |||
| for { | |||
| err, ok := <-ch | |||
| if !ok { | |||
| break | |||
| } | |||
| errs = append(errs, err.Error()) | |||
| } | |||
| if len(errs) > 0 { | |||
| panic("check fail: " + strings.Join(errs, "\n")) | |||
| } | |||
| } | |||
| // Write meta to disk. | |||
| if err := tx.writeMeta(); err != nil { | |||
| tx.rollback() | |||
| return err | |||
| } | |||
| tx.stats.WriteTime += time.Since(startTime) | |||
| // Finalize the transaction. | |||
| tx.close() | |||
| // Execute commit handlers now that the locks have been removed. | |||
| for _, fn := range tx.commitHandlers { | |||
| fn() | |||
| } | |||
| return nil | |||
| } | |||
| // Rollback closes the transaction and ignores all previous updates. Read-only | |||
| // transactions must be rolled back and not committed. | |||
| func (tx *Tx) Rollback() error { | |||
| _assert(!tx.managed, "managed tx rollback not allowed") | |||
| if tx.db == nil { | |||
| return ErrTxClosed | |||
| } | |||
| tx.rollback() | |||
| return nil | |||
| } | |||
| func (tx *Tx) rollback() { | |||
| if tx.db == nil { | |||
| return | |||
| } | |||
| if tx.writable { | |||
| tx.db.freelist.rollback(tx.meta.txid) | |||
| tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) | |||
| } | |||
| tx.close() | |||
| } | |||
| func (tx *Tx) close() { | |||
| if tx.db == nil { | |||
| return | |||
| } | |||
| if tx.writable { | |||
| // Grab freelist stats. | |||
| var freelistFreeN = tx.db.freelist.free_count() | |||
| var freelistPendingN = tx.db.freelist.pending_count() | |||
| var freelistAlloc = tx.db.freelist.size() | |||
| // Remove transaction ref & writer lock. | |||
| tx.db.rwtx = nil | |||
| tx.db.rwlock.Unlock() | |||
| // Merge statistics. | |||
| tx.db.statlock.Lock() | |||
| tx.db.stats.FreePageN = freelistFreeN | |||
| tx.db.stats.PendingPageN = freelistPendingN | |||
| tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize | |||
| tx.db.stats.FreelistInuse = freelistAlloc | |||
| tx.db.stats.TxStats.add(&tx.stats) | |||
| tx.db.statlock.Unlock() | |||
| } else { | |||
| tx.db.removeTx(tx) | |||
| } | |||
| // Clear all references. | |||
| tx.db = nil | |||
| tx.meta = nil | |||
| tx.root = Bucket{tx: tx} | |||
| tx.pages = nil | |||
| } | |||
| // Copy writes the entire database to a writer. | |||
| // This function exists for backwards compatibility. Use WriteTo() instead. | |||
| func (tx *Tx) Copy(w io.Writer) error { | |||
| _, err := tx.WriteTo(w) | |||
| return err | |||
| } | |||
| // WriteTo writes the entire database to a writer. | |||
| // If err == nil then exactly tx.Size() bytes will be written into the writer. | |||
| func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { | |||
| // Attempt to open reader with WriteFlag | |||
| f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| defer func() { _ = f.Close() }() | |||
| // Generate a meta page. We use the same page data for both meta pages. | |||
| buf := make([]byte, tx.db.pageSize) | |||
| page := (*page)(unsafe.Pointer(&buf[0])) | |||
| page.flags = metaPageFlag | |||
| *page.meta() = *tx.meta | |||
| // Write meta 0. | |||
| page.id = 0 | |||
| page.meta().checksum = page.meta().sum64() | |||
| nn, err := w.Write(buf) | |||
| n += int64(nn) | |||
| if err != nil { | |||
| return n, fmt.Errorf("meta 0 copy: %s", err) | |||
| } | |||
| // Write meta 1 with a lower transaction id. | |||
| page.id = 1 | |||
| page.meta().txid -= 1 | |||
| page.meta().checksum = page.meta().sum64() | |||
| nn, err = w.Write(buf) | |||
| n += int64(nn) | |||
| if err != nil { | |||
| return n, fmt.Errorf("meta 1 copy: %s", err) | |||
| } | |||
| // Move past the meta pages in the file. | |||
| if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { | |||
| return n, fmt.Errorf("seek: %s", err) | |||
| } | |||
| // Copy data pages. | |||
| wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) | |||
| n += wn | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| return n, f.Close() | |||
| } | |||
| // CopyFile copies the entire database to file at the given path. | |||
| // A reader transaction is maintained during the copy so it is safe to continue | |||
| // using the database while a copy is in progress. | |||
| func (tx *Tx) CopyFile(path string, mode os.FileMode) error { | |||
| f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = tx.Copy(f) | |||
| if err != nil { | |||
| _ = f.Close() | |||
| return err | |||
| } | |||
| return f.Close() | |||
| } | |||
| // Check performs several consistency checks on the database for this transaction. | |||
| // An error is returned if any inconsistency is found. | |||
| // | |||
| // It can be safely run concurrently on a writable transaction. However, this | |||
| // incurs a high cost for large databases and databases with a lot of subbuckets | |||
| // because of caching. This overhead can be removed if running on a read-only | |||
| // transaction, however, it is not safe to execute other writer transactions at | |||
| // the same time. | |||
| func (tx *Tx) Check() <-chan error { | |||
| ch := make(chan error) | |||
| go tx.check(ch) | |||
| return ch | |||
| } | |||
| func (tx *Tx) check(ch chan error) { | |||
| // Check if any pages are double freed. | |||
| freed := make(map[pgid]bool) | |||
| for _, id := range tx.db.freelist.all() { | |||
| if freed[id] { | |||
| ch <- fmt.Errorf("page %d: already freed", id) | |||
| } | |||
| freed[id] = true | |||
| } | |||
| // Track every reachable page. | |||
| reachable := make(map[pgid]*page) | |||
| reachable[0] = tx.page(0) // meta0 | |||
| reachable[1] = tx.page(1) // meta1 | |||
| for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { | |||
| reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) | |||
| } | |||
| // Recursively check buckets. | |||
| tx.checkBucket(&tx.root, reachable, freed, ch) | |||
| // Ensure all pages below high water mark are either reachable or freed. | |||
| for i := pgid(0); i < tx.meta.pgid; i++ { | |||
| _, isReachable := reachable[i] | |||
| if !isReachable && !freed[i] { | |||
| ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) | |||
| } | |||
| } | |||
| // Close the channel to signal completion. | |||
| close(ch) | |||
| } | |||
| func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { | |||
| // Ignore inline buckets. | |||
| if b.root == 0 { | |||
| return | |||
| } | |||
| // Check every page used by this bucket. | |||
| b.tx.forEachPage(b.root, 0, func(p *page, _ int) { | |||
| if p.id > tx.meta.pgid { | |||
| ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) | |||
| } | |||
| // Ensure each page is only referenced once. | |||
| for i := pgid(0); i <= pgid(p.overflow); i++ { | |||
| var id = p.id + i | |||
| if _, ok := reachable[id]; ok { | |||
| ch <- fmt.Errorf("page %d: multiple references", int(id)) | |||
| } | |||
| reachable[id] = p | |||
| } | |||
| // We should only encounter un-freed leaf and branch pages. | |||
| if freed[p.id] { | |||
| ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) | |||
| } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { | |||
| ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) | |||
| } | |||
| }) | |||
| // Check each bucket within this bucket. | |||
| _ = b.ForEach(func(k, v []byte) error { | |||
| if child := b.Bucket(k); child != nil { | |||
| tx.checkBucket(child, reachable, freed, ch) | |||
| } | |||
| return nil | |||
| }) | |||
| } | |||
| // allocate returns a contiguous block of memory starting at a given page. | |||
| func (tx *Tx) allocate(count int) (*page, error) { | |||
| p, err := tx.db.allocate(count) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // Save to our page cache. | |||
| tx.pages[p.id] = p | |||
| // Update statistics. | |||
| tx.stats.PageCount++ | |||
| tx.stats.PageAlloc += count * tx.db.pageSize | |||
| return p, nil | |||
| } | |||
| // write writes any dirty pages to disk. | |||
| func (tx *Tx) write() error { | |||
| // Sort pages by id. | |||
| pages := make(pages, 0, len(tx.pages)) | |||
| for _, p := range tx.pages { | |||
| pages = append(pages, p) | |||
| } | |||
| sort.Sort(pages) | |||
| // Write pages to disk in order. | |||
| for _, p := range pages { | |||
| size := (int(p.overflow) + 1) * tx.db.pageSize | |||
| offset := int64(p.id) * int64(tx.db.pageSize) | |||
| // Write out page in "max allocation" sized chunks. | |||
| ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) | |||
| for { | |||
| // Limit our write to our max allocation size. | |||
| sz := size | |||
| if sz > maxAllocSize-1 { | |||
| sz = maxAllocSize - 1 | |||
| } | |||
| // Write chunk to disk. | |||
| buf := ptr[:sz] | |||
| if _, err := tx.db.ops.writeAt(buf, offset); err != nil { | |||
| return err | |||
| } | |||
| // Update statistics. | |||
| tx.stats.Write++ | |||
| // Exit inner for loop if we've written all the chunks. | |||
| size -= sz | |||
| if size == 0 { | |||
| break | |||
| } | |||
| // Otherwise move offset forward and move pointer to next chunk. | |||
| offset += int64(sz) | |||
| ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) | |||
| } | |||
| } | |||
| // Ignore file sync if flag is set on DB. | |||
| if !tx.db.NoSync || IgnoreNoSync { | |||
| if err := fdatasync(tx.db); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| // Clear out page cache. | |||
| tx.pages = make(map[pgid]*page) | |||
| return nil | |||
| } | |||
| // writeMeta writes the meta to the disk. | |||
| func (tx *Tx) writeMeta() error { | |||
| // Create a temporary buffer for the meta page. | |||
| buf := make([]byte, tx.db.pageSize) | |||
| p := tx.db.pageInBuffer(buf, 0) | |||
| tx.meta.write(p) | |||
| // Write the meta page to file. | |||
| if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { | |||
| return err | |||
| } | |||
| if !tx.db.NoSync || IgnoreNoSync { | |||
| if err := fdatasync(tx.db); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| // Update statistics. | |||
| tx.stats.Write++ | |||
| return nil | |||
| } | |||
| // page returns a reference to the page with a given id. | |||
| // If page has been written to then a temporary buffered page is returned. | |||
| func (tx *Tx) page(id pgid) *page { | |||
| // Check the dirty pages first. | |||
| if tx.pages != nil { | |||
| if p, ok := tx.pages[id]; ok { | |||
| return p | |||
| } | |||
| } | |||
| // Otherwise return directly from the mmap. | |||
| return tx.db.page(id) | |||
| } | |||
| // forEachPage iterates over every page within a given page and executes a function. | |||
| func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { | |||
| p := tx.page(pgid) | |||
| // Execute function. | |||
| fn(p, depth) | |||
| // Recursively loop over children. | |||
| if (p.flags & branchPageFlag) != 0 { | |||
| for i := 0; i < int(p.count); i++ { | |||
| elem := p.branchPageElement(uint16(i)) | |||
| tx.forEachPage(elem.pgid, depth+1, fn) | |||
| } | |||
| } | |||
| } | |||
| // Page returns page information for a given page number. | |||
| // This is only safe for concurrent use when used by a writable transaction. | |||
| func (tx *Tx) Page(id int) (*PageInfo, error) { | |||
| if tx.db == nil { | |||
| return nil, ErrTxClosed | |||
| } else if pgid(id) >= tx.meta.pgid { | |||
| return nil, nil | |||
| } | |||
| // Build the page info. | |||
| p := tx.db.page(pgid(id)) | |||
| info := &PageInfo{ | |||
| ID: id, | |||
| Count: int(p.count), | |||
| OverflowCount: int(p.overflow), | |||
| } | |||
| // Determine the type (or if it's free). | |||
| if tx.db.freelist.freed(pgid(id)) { | |||
| info.Type = "free" | |||
| } else { | |||
| info.Type = p.typ() | |||
| } | |||
| return info, nil | |||
| } | |||
| // TxStats represents statistics about the actions performed by the transaction. | |||
| type TxStats struct { | |||
| // Page statistics. | |||
| PageCount int // number of page allocations | |||
| PageAlloc int // total bytes allocated | |||
| // Cursor statistics. | |||
| CursorCount int // number of cursors created | |||
| // Node statistics | |||
| NodeCount int // number of node allocations | |||
| NodeDeref int // number of node dereferences | |||
| // Rebalance statistics. | |||
| Rebalance int // number of node rebalances | |||
| RebalanceTime time.Duration // total time spent rebalancing | |||
| // Split/Spill statistics. | |||
| Split int // number of nodes split | |||
| Spill int // number of nodes spilled | |||
| SpillTime time.Duration // total time spent spilling | |||
| // Write statistics. | |||
| Write int // number of writes performed | |||
| WriteTime time.Duration // total time spent writing to disk | |||
| } | |||
| func (s *TxStats) add(other *TxStats) { | |||
| s.PageCount += other.PageCount | |||
| s.PageAlloc += other.PageAlloc | |||
| s.CursorCount += other.CursorCount | |||
| s.NodeCount += other.NodeCount | |||
| s.NodeDeref += other.NodeDeref | |||
| s.Rebalance += other.Rebalance | |||
| s.RebalanceTime += other.RebalanceTime | |||
| s.Split += other.Split | |||
| s.Spill += other.Spill | |||
| s.SpillTime += other.SpillTime | |||
| s.Write += other.Write | |||
| s.WriteTime += other.WriteTime | |||
| } | |||
| // Sub calculates and returns the difference between two sets of transaction stats. | |||
| // This is useful when obtaining stats at two different points and time and | |||
| // you need the performance counters that occurred within that time span. | |||
| func (s *TxStats) Sub(other *TxStats) TxStats { | |||
| var diff TxStats | |||
| diff.PageCount = s.PageCount - other.PageCount | |||
| diff.PageAlloc = s.PageAlloc - other.PageAlloc | |||
| diff.CursorCount = s.CursorCount - other.CursorCount | |||
| diff.NodeCount = s.NodeCount - other.NodeCount | |||
| diff.NodeDeref = s.NodeDeref - other.NodeDeref | |||
| diff.Rebalance = s.Rebalance - other.Rebalance | |||
| diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime | |||
| diff.Split = s.Split - other.Split | |||
| diff.Spill = s.Spill - other.Spill | |||
| diff.SpillTime = s.SpillTime - other.SpillTime | |||
| diff.Write = s.Write - other.Write | |||
| diff.WriteTime = s.WriteTime - other.WriteTime | |||
| return diff | |||
| } | |||
| @@ -0,0 +1,202 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright [yyyy] [name of copyright owner] | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -0,0 +1,5 @@ | |||
| CoreOS Project | |||
| Copyright 2014 CoreOS, Inc | |||
| This product includes software developed at CoreOS, Inc. | |||
| (http://www.coreos.com/). | |||
| @@ -0,0 +1,162 @@ | |||
| // Copyright 2015 CoreOS, Inc. | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Package error describes errors in etcd project. When any change happens, | |||
| // Documentation/errorcode.md needs to be updated correspondingly. | |||
| package error | |||
| import ( | |||
| "encoding/json" | |||
| "fmt" | |||
| "net/http" | |||
| ) | |||
| var errors = map[int]string{ | |||
| // command related errors | |||
| EcodeKeyNotFound: "Key not found", | |||
| EcodeTestFailed: "Compare failed", //test and set | |||
| EcodeNotFile: "Not a file", | |||
| ecodeNoMorePeer: "Reached the max number of peers in the cluster", | |||
| EcodeNotDir: "Not a directory", | |||
| EcodeNodeExist: "Key already exists", // create | |||
| ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd", | |||
| EcodeRootROnly: "Root is read only", | |||
| EcodeDirNotEmpty: "Directory not empty", | |||
| ecodeExistingPeerAddr: "Peer address has existed", | |||
| EcodeUnauthorized: "The request requires user authentication", | |||
| // Post form related errors | |||
| ecodeValueRequired: "Value is Required in POST form", | |||
| EcodePrevValueRequired: "PrevValue is Required in POST form", | |||
| EcodeTTLNaN: "The given TTL in POST form is not a number", | |||
| EcodeIndexNaN: "The given index in POST form is not a number", | |||
| ecodeValueOrTTLRequired: "Value or TTL is required in POST form", | |||
| ecodeTimeoutNaN: "The given timeout in POST form is not a number", | |||
| ecodeNameRequired: "Name is required in POST form", | |||
| ecodeIndexOrValueRequired: "Index or value is required", | |||
| ecodeIndexValueMutex: "Index and value cannot both be specified", | |||
| EcodeInvalidField: "Invalid field", | |||
| EcodeInvalidForm: "Invalid POST form", | |||
| EcodeRefreshValue: "Value provided on refresh", | |||
| EcodeRefreshTTLRequired: "A TTL must be provided on refresh", | |||
| // raft related errors | |||
| EcodeRaftInternal: "Raft Internal Error", | |||
| EcodeLeaderElect: "During Leader Election", | |||
| // etcd related errors | |||
| EcodeWatcherCleared: "watcher is cleared due to etcd recovery", | |||
| EcodeEventIndexCleared: "The event in requested index is outdated and cleared", | |||
| ecodeStandbyInternal: "Standby Internal Error", | |||
| ecodeInvalidActiveSize: "Invalid active size", | |||
| ecodeInvalidRemoveDelay: "Standby remove delay", | |||
| // client related errors | |||
| ecodeClientInternal: "Client Internal Error", | |||
| } | |||
| var errorStatus = map[int]int{ | |||
| EcodeKeyNotFound: http.StatusNotFound, | |||
| EcodeNotFile: http.StatusForbidden, | |||
| EcodeDirNotEmpty: http.StatusForbidden, | |||
| EcodeUnauthorized: http.StatusUnauthorized, | |||
| EcodeTestFailed: http.StatusPreconditionFailed, | |||
| EcodeNodeExist: http.StatusPreconditionFailed, | |||
| EcodeRaftInternal: http.StatusInternalServerError, | |||
| EcodeLeaderElect: http.StatusInternalServerError, | |||
| } | |||
| const ( | |||
| EcodeKeyNotFound = 100 | |||
| EcodeTestFailed = 101 | |||
| EcodeNotFile = 102 | |||
| ecodeNoMorePeer = 103 | |||
| EcodeNotDir = 104 | |||
| EcodeNodeExist = 105 | |||
| ecodeKeyIsPreserved = 106 | |||
| EcodeRootROnly = 107 | |||
| EcodeDirNotEmpty = 108 | |||
| ecodeExistingPeerAddr = 109 | |||
| EcodeUnauthorized = 110 | |||
| ecodeValueRequired = 200 | |||
| EcodePrevValueRequired = 201 | |||
| EcodeTTLNaN = 202 | |||
| EcodeIndexNaN = 203 | |||
| ecodeValueOrTTLRequired = 204 | |||
| ecodeTimeoutNaN = 205 | |||
| ecodeNameRequired = 206 | |||
| ecodeIndexOrValueRequired = 207 | |||
| ecodeIndexValueMutex = 208 | |||
| EcodeInvalidField = 209 | |||
| EcodeInvalidForm = 210 | |||
| EcodeRefreshValue = 211 | |||
| EcodeRefreshTTLRequired = 212 | |||
| EcodeRaftInternal = 300 | |||
| EcodeLeaderElect = 301 | |||
| EcodeWatcherCleared = 400 | |||
| EcodeEventIndexCleared = 401 | |||
| ecodeStandbyInternal = 402 | |||
| ecodeInvalidActiveSize = 403 | |||
| ecodeInvalidRemoveDelay = 404 | |||
| ecodeClientInternal = 500 | |||
| ) | |||
| type Error struct { | |||
| ErrorCode int `json:"errorCode"` | |||
| Message string `json:"message"` | |||
| Cause string `json:"cause,omitempty"` | |||
| Index uint64 `json:"index"` | |||
| } | |||
| func NewRequestError(errorCode int, cause string) *Error { | |||
| return NewError(errorCode, cause, 0) | |||
| } | |||
| func NewError(errorCode int, cause string, index uint64) *Error { | |||
| return &Error{ | |||
| ErrorCode: errorCode, | |||
| Message: errors[errorCode], | |||
| Cause: cause, | |||
| Index: index, | |||
| } | |||
| } | |||
| // Error is for the error interface | |||
| func (e Error) Error() string { | |||
| return e.Message + " (" + e.Cause + ")" | |||
| } | |||
| func (e Error) toJsonString() string { | |||
| b, _ := json.Marshal(e) | |||
| return string(b) | |||
| } | |||
| func (e Error) StatusCode() int { | |||
| status, ok := errorStatus[e.ErrorCode] | |||
| if !ok { | |||
| status = http.StatusBadRequest | |||
| } | |||
| return status | |||
| } | |||
| func (e Error) WriteTo(w http.ResponseWriter) { | |||
| w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) | |||
| w.Header().Set("Content-Type", "application/json") | |||
| w.WriteHeader(e.StatusCode()) | |||
| fmt.Fprintln(w, e.toJsonString()) | |||
| } | |||
| @@ -0,0 +1,202 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, | |||
| and distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by | |||
| the copyright owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all | |||
| other entities that control, are controlled by, or are under common | |||
| control with that entity. For the purposes of this definition, | |||
| "control" means (i) the power, direct or indirect, to cause the | |||
| direction or management of such entity, whether by contract or | |||
| otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity | |||
| exercising permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, | |||
| including but not limited to software source code, documentation | |||
| source, and configuration files. | |||
| "Object" form shall mean any form resulting from mechanical | |||
| transformation or translation of a Source form, including but | |||
| not limited to compiled object code, generated documentation, | |||
| and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or | |||
| Object form, made available under the License, as indicated by a | |||
| copyright notice that is included in or attached to the work | |||
| (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object | |||
| form, that is based on (or derived from) the Work and for which the | |||
| editorial revisions, annotations, elaborations, or other modifications | |||
| represent, as a whole, an original work of authorship. For the purposes | |||
| of this License, Derivative Works shall not include works that remain | |||
| separable from, or merely link (or bind by name) to the interfaces of, | |||
| the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including | |||
| the original version of the Work and any modifications or additions | |||
| to that Work or Derivative Works thereof, that is intentionally | |||
| submitted to Licensor for inclusion in the Work by the copyright owner | |||
| or by an individual or Legal Entity authorized to submit on behalf of | |||
| the copyright owner. For the purposes of this definition, "submitted" | |||
| means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, | |||
| and issue tracking systems that are managed by, or on behalf of, the | |||
| Licensor for the purpose of discussing and improving the Work, but | |||
| excluding communication that is conspicuously marked or otherwise | |||
| designated in writing by the copyright owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity | |||
| on behalf of whom a Contribution has been received by Licensor and | |||
| subsequently incorporated within the Work. | |||
| 2. Grant of Copyright License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the | |||
| Work and such Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. Subject to the terms and conditions of | |||
| this License, each Contributor hereby grants to You a perpetual, | |||
| worldwide, non-exclusive, no-charge, royalty-free, irrevocable | |||
| (except as stated in this section) patent license to make, have made, | |||
| use, offer to sell, sell, import, and otherwise transfer the Work, | |||
| where such license applies only to those patent claims licensable | |||
| by such Contributor that are necessarily infringed by their | |||
| Contribution(s) alone or by combination of their Contribution(s) | |||
| with the Work to which such Contribution(s) was submitted. If You | |||
| institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work | |||
| or a Contribution incorporated within the Work constitutes direct | |||
| or contributory patent infringement, then any patent licenses | |||
| granted to You under this License for that Work shall terminate | |||
| as of the date such litigation is filed. | |||
| 4. Redistribution. You may reproduce and distribute copies of the | |||
| Work or Derivative Works thereof in any medium, with or without | |||
| modifications, and in Source or Object form, provided that You | |||
| meet the following conditions: | |||
| (a) You must give any other recipients of the Work or | |||
| Derivative Works a copy of this License; and | |||
| (b) You must cause any modified files to carry prominent notices | |||
| stating that You changed the files; and | |||
| (c) You must retain, in the Source form of any Derivative Works | |||
| that You distribute, all copyright, patent, trademark, and | |||
| attribution notices from the Source form of the Work, | |||
| excluding those notices that do not pertain to any part of | |||
| the Derivative Works; and | |||
| (d) If the Work includes a "NOTICE" text file as part of its | |||
| distribution, then any Derivative Works that You distribute must | |||
| include a readable copy of the attribution notices contained | |||
| within such NOTICE file, excluding those notices that do not | |||
| pertain to any part of the Derivative Works, in at least one | |||
| of the following places: within a NOTICE text file distributed | |||
| as part of the Derivative Works; within the Source form or | |||
| documentation, if provided along with the Derivative Works; or, | |||
| within a display generated by the Derivative Works, if and | |||
| wherever such third-party notices normally appear. The contents | |||
| of the NOTICE file are for informational purposes only and | |||
| do not modify the License. You may add Your own attribution | |||
| notices within Derivative Works that You distribute, alongside | |||
| or as an addendum to the NOTICE text from the Work, provided | |||
| that such additional attribution notices cannot be construed | |||
| as modifying the License. | |||
| You may add Your own copyright statement to Your modifications and | |||
| may provide additional or different license terms and conditions | |||
| for use, reproduction, or distribution of Your modifications, or | |||
| for any such Derivative Works as a whole, provided Your use, | |||
| reproduction, and distribution of the Work otherwise complies with | |||
| the conditions stated in this License. | |||
| 5. Submission of Contributions. Unless You explicitly state otherwise, | |||
| any Contribution intentionally submitted for inclusion in the Work | |||
| by You to the Licensor shall be under the terms and conditions of | |||
| this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify | |||
| the terms of any separate license agreement you may have executed | |||
| with Licensor regarding such Contributions. | |||
| 6. Trademarks. This License does not grant permission to use the trade | |||
| names, trademarks, service marks, or product names of the Licensor, | |||
| except as required for reasonable and customary use in describing the | |||
| origin of the Work and reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. Unless required by applicable law or | |||
| agreed to in writing, Licensor provides the Work (and each | |||
| Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||
| implied, including, without limitation, any warranties or conditions | |||
| of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | |||
| PARTICULAR PURPOSE. You are solely responsible for determining the | |||
| appropriateness of using or redistributing the Work and assume any | |||
| risks associated with Your exercise of permissions under this License. | |||
| 8. Limitation of Liability. In no event and under no legal theory, | |||
| whether in tort (including negligence), contract, or otherwise, | |||
| unless required by applicable law (such as deliberate and grossly | |||
| negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, | |||
| incidental, or consequential damages of any character arising as a | |||
| result of this License or out of the use or inability to use the | |||
| Work (including but not limited to damages for loss of goodwill, | |||
| work stoppage, computer failure or malfunction, or any and all | |||
| other commercial damages or losses), even if such Contributor | |||
| has been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. While redistributing | |||
| the Work or Derivative Works thereof, You may choose to offer, | |||
| and charge a fee for, acceptance of support, warranty, indemnity, | |||
| or other liability obligations and/or rights consistent with this | |||
| License. However, in accepting such obligations, You may act only | |||
| on Your own behalf and on Your sole responsibility, not on behalf | |||
| of any other Contributor, and only if You agree to indemnify, | |||
| defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason | |||
| of your accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work. | |||
| To apply the Apache License to your work, attach the following | |||
| boilerplate notice, with the fields enclosed by brackets "[]" | |||
| replaced with your own identifying information. (Don't include | |||
| the brackets!) The text should be enclosed in the appropriate | |||
| comment syntax for the file format. We also recommend that a | |||
| file or class name and description of purpose be included on the | |||
| same "printed page" as the copyright notice for easier | |||
| identification within third-party archives. | |||
| Copyright [yyyy] [name of copyright owner] | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -0,0 +1,23 @@ | |||
| package etcd | |||
| // Add a new directory with a random etcd-generated key under the given path. | |||
| func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.post(key, "", ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // Add a new file with a random etcd-generated key under the given path. | |||
| func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.post(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| @@ -0,0 +1,476 @@ | |||
| package etcd | |||
| import ( | |||
| "crypto/tls" | |||
| "crypto/x509" | |||
| "encoding/json" | |||
| "errors" | |||
| "io" | |||
| "io/ioutil" | |||
| "math/rand" | |||
| "net" | |||
| "net/http" | |||
| "net/url" | |||
| "os" | |||
| "path" | |||
| "strings" | |||
| "time" | |||
| ) | |||
| // See SetConsistency for how to use these constants. | |||
| const ( | |||
| // Using strings rather than iota because the consistency level | |||
| // could be persisted to disk, so it'd be better to use | |||
| // human-readable values. | |||
| STRONG_CONSISTENCY = "STRONG" | |||
| WEAK_CONSISTENCY = "WEAK" | |||
| ) | |||
| const ( | |||
| defaultBufferSize = 10 | |||
| ) | |||
| func init() { | |||
| rand.Seed(int64(time.Now().Nanosecond())) | |||
| } | |||
| type Config struct { | |||
| CertFile string `json:"certFile"` | |||
| KeyFile string `json:"keyFile"` | |||
| CaCertFile []string `json:"caCertFiles"` | |||
| DialTimeout time.Duration `json:"timeout"` | |||
| Consistency string `json:"consistency"` | |||
| } | |||
| type credentials struct { | |||
| username string | |||
| password string | |||
| } | |||
| type Client struct { | |||
| config Config `json:"config"` | |||
| cluster *Cluster `json:"cluster"` | |||
| httpClient *http.Client | |||
| credentials *credentials | |||
| transport *http.Transport | |||
| persistence io.Writer | |||
| cURLch chan string | |||
| // CheckRetry can be used to control the policy for failed requests | |||
| // and modify the cluster if needed. | |||
| // The client calls it before sending requests again, and | |||
| // stops retrying if CheckRetry returns some error. The cases that | |||
| // this function needs to handle include no response and unexpected | |||
| // http status code of response. | |||
| // If CheckRetry is nil, client will call the default one | |||
| // `DefaultCheckRetry`. | |||
| // Argument cluster is the etcd.Cluster object that these requests have been made on. | |||
| // Argument numReqs is the number of http.Requests that have been made so far. | |||
| // Argument lastResp is the http.Responses from the last request. | |||
| // Argument err is the reason of the failure. | |||
| CheckRetry func(cluster *Cluster, numReqs int, | |||
| lastResp http.Response, err error) error | |||
| } | |||
| // NewClient create a basic client that is configured to be used | |||
| // with the given machine list. | |||
| func NewClient(machines []string) *Client { | |||
| config := Config{ | |||
| // default timeout is one second | |||
| DialTimeout: time.Second, | |||
| Consistency: WEAK_CONSISTENCY, | |||
| } | |||
| client := &Client{ | |||
| cluster: NewCluster(machines), | |||
| config: config, | |||
| } | |||
| client.initHTTPClient() | |||
| client.saveConfig() | |||
| return client | |||
| } | |||
| // NewTLSClient create a basic client with TLS configuration | |||
| func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) { | |||
| // overwrite the default machine to use https | |||
| if len(machines) == 0 { | |||
| machines = []string{"https://127.0.0.1:4001"} | |||
| } | |||
| config := Config{ | |||
| // default timeout is one second | |||
| DialTimeout: time.Second, | |||
| Consistency: WEAK_CONSISTENCY, | |||
| CertFile: cert, | |||
| KeyFile: key, | |||
| CaCertFile: make([]string, 0), | |||
| } | |||
| client := &Client{ | |||
| cluster: NewCluster(machines), | |||
| config: config, | |||
| } | |||
| err := client.initHTTPSClient(cert, key) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = client.AddRootCA(caCert) | |||
| client.saveConfig() | |||
| return client, nil | |||
| } | |||
| // NewClientFromFile creates a client from a given file path. | |||
| // The given file is expected to use the JSON format. | |||
| func NewClientFromFile(fpath string) (*Client, error) { | |||
| fi, err := os.Open(fpath) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer func() { | |||
| if err := fi.Close(); err != nil { | |||
| panic(err) | |||
| } | |||
| }() | |||
| return NewClientFromReader(fi) | |||
| } | |||
| // NewClientFromReader creates a Client configured from a given reader. | |||
| // The configuration is expected to use the JSON format. | |||
| func NewClientFromReader(reader io.Reader) (*Client, error) { | |||
| c := new(Client) | |||
| b, err := ioutil.ReadAll(reader) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| err = json.Unmarshal(b, c) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if c.config.CertFile == "" { | |||
| c.initHTTPClient() | |||
| } else { | |||
| err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile) | |||
| } | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| for _, caCert := range c.config.CaCertFile { | |||
| if err := c.AddRootCA(caCert); err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| return c, nil | |||
| } | |||
| // Override the Client's HTTP Transport object | |||
| func (c *Client) SetTransport(tr *http.Transport) { | |||
| c.httpClient.Transport = tr | |||
| c.transport = tr | |||
| } | |||
| func (c *Client) SetCredentials(username, password string) { | |||
| c.credentials = &credentials{username, password} | |||
| } | |||
| func (c *Client) Close() { | |||
| c.transport.DisableKeepAlives = true | |||
| c.transport.CloseIdleConnections() | |||
| } | |||
| // initHTTPClient initializes a HTTP client for etcd client | |||
| func (c *Client) initHTTPClient() { | |||
| c.transport = &http.Transport{ | |||
| Dial: c.DefaultDial, | |||
| TLSClientConfig: &tls.Config{ | |||
| InsecureSkipVerify: true, | |||
| }, | |||
| } | |||
| c.httpClient = &http.Client{Transport: c.transport} | |||
| } | |||
| // initHTTPClient initializes a HTTPS client for etcd client | |||
| func (c *Client) initHTTPSClient(cert, key string) error { | |||
| if cert == "" || key == "" { | |||
| return errors.New("Require both cert and key path") | |||
| } | |||
| tlsCert, err := tls.LoadX509KeyPair(cert, key) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| tlsConfig := &tls.Config{ | |||
| Certificates: []tls.Certificate{tlsCert}, | |||
| InsecureSkipVerify: true, | |||
| } | |||
| c.transport = &http.Transport{ | |||
| TLSClientConfig: tlsConfig, | |||
| Dial: c.DefaultDial, | |||
| } | |||
| c.httpClient = &http.Client{Transport: c.transport} | |||
| return nil | |||
| } | |||
| // SetPersistence sets a writer to which the config will be | |||
| // written every time it's changed. | |||
| func (c *Client) SetPersistence(writer io.Writer) { | |||
| c.persistence = writer | |||
| } | |||
| // SetConsistency changes the consistency level of the client. | |||
| // | |||
| // When consistency is set to STRONG_CONSISTENCY, all requests, | |||
| // including GET, are sent to the leader. This means that, assuming | |||
| // the absence of leader failures, GET requests are guaranteed to see | |||
| // the changes made by previous requests. | |||
| // | |||
| // When consistency is set to WEAK_CONSISTENCY, other requests | |||
| // are still sent to the leader, but GET requests are sent to a | |||
| // random server from the server pool. This reduces the read | |||
| // load on the leader, but it's not guaranteed that the GET requests | |||
| // will see changes made by previous requests (they might have not | |||
| // yet been committed on non-leader servers). | |||
| func (c *Client) SetConsistency(consistency string) error { | |||
| if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) { | |||
| return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.") | |||
| } | |||
| c.config.Consistency = consistency | |||
| return nil | |||
| } | |||
| // Sets the DialTimeout value | |||
| func (c *Client) SetDialTimeout(d time.Duration) { | |||
| c.config.DialTimeout = d | |||
| } | |||
| // AddRootCA adds a root CA cert for the etcd client | |||
| func (c *Client) AddRootCA(caCert string) error { | |||
| if c.httpClient == nil { | |||
| return errors.New("Client has not been initialized yet!") | |||
| } | |||
| certBytes, err := ioutil.ReadFile(caCert) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| tr, ok := c.httpClient.Transport.(*http.Transport) | |||
| if !ok { | |||
| panic("AddRootCA(): Transport type assert should not fail") | |||
| } | |||
| if tr.TLSClientConfig.RootCAs == nil { | |||
| caCertPool := x509.NewCertPool() | |||
| ok = caCertPool.AppendCertsFromPEM(certBytes) | |||
| if ok { | |||
| tr.TLSClientConfig.RootCAs = caCertPool | |||
| } | |||
| tr.TLSClientConfig.InsecureSkipVerify = false | |||
| } else { | |||
| ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes) | |||
| } | |||
| if !ok { | |||
| err = errors.New("Unable to load caCert") | |||
| } | |||
| c.config.CaCertFile = append(c.config.CaCertFile, caCert) | |||
| c.saveConfig() | |||
| return err | |||
| } | |||
| // SetCluster updates cluster information using the given machine list. | |||
| func (c *Client) SetCluster(machines []string) bool { | |||
| success := c.internalSyncCluster(machines) | |||
| return success | |||
| } | |||
| func (c *Client) GetCluster() []string { | |||
| return c.cluster.Machines | |||
| } | |||
| // SyncCluster updates the cluster information using the internal machine list. | |||
| // If no members are found, the intenral machine list is left untouched. | |||
| func (c *Client) SyncCluster() bool { | |||
| return c.internalSyncCluster(c.cluster.Machines) | |||
| } | |||
| // internalSyncCluster syncs cluster information using the given machine list. | |||
| func (c *Client) internalSyncCluster(machines []string) bool { | |||
| // comma-separated list of machines in the cluster. | |||
| members := "" | |||
| for _, machine := range machines { | |||
| httpPath := c.createHttpPath(machine, path.Join(version, "members")) | |||
| resp, err := c.httpClient.Get(httpPath) | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| if resp.StatusCode != http.StatusOK { // fall-back to old endpoint | |||
| httpPath := c.createHttpPath(machine, path.Join(version, "machines")) | |||
| resp, err := c.httpClient.Get(httpPath) | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| b, err := ioutil.ReadAll(resp.Body) | |||
| resp.Body.Close() | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| members = string(b) | |||
| } else { | |||
| b, err := ioutil.ReadAll(resp.Body) | |||
| resp.Body.Close() | |||
| if err != nil { | |||
| // try another machine in the cluster | |||
| continue | |||
| } | |||
| var mCollection memberCollection | |||
| if err := json.Unmarshal(b, &mCollection); err != nil { | |||
| // try another machine | |||
| continue | |||
| } | |||
| urls := make([]string, 0) | |||
| for _, m := range mCollection { | |||
| urls = append(urls, m.ClientURLs...) | |||
| } | |||
| members = strings.Join(urls, ",") | |||
| } | |||
| // We should never do an empty cluster update. | |||
| if members == "" { | |||
| continue | |||
| } | |||
| // update Machines List | |||
| c.cluster.updateFromStr(members) | |||
| logger.Debug("sync.machines ", c.cluster.Machines) | |||
| c.saveConfig() | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| // createHttpPath creates a complete HTTP URL. | |||
| // serverName should contain both the host name and a port number, if any. | |||
| func (c *Client) createHttpPath(serverName string, _path string) string { | |||
| u, err := url.Parse(serverName) | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| u.Path = path.Join(u.Path, _path) | |||
| if u.Scheme == "" { | |||
| u.Scheme = "http" | |||
| } | |||
| return u.String() | |||
| } | |||
| // DefaultDial attempts to open a TCP connection to the provided address, explicitly | |||
| // enabling keep-alives with a one-second interval. | |||
| func (c *Client) DefaultDial(network, addr string) (net.Conn, error) { | |||
| dialer := net.Dialer{ | |||
| Timeout: c.config.DialTimeout, | |||
| KeepAlive: time.Second, | |||
| } | |||
| return dialer.Dial(network, addr) | |||
| } | |||
| func (c *Client) OpenCURL() { | |||
| c.cURLch = make(chan string, defaultBufferSize) | |||
| } | |||
| func (c *Client) CloseCURL() { | |||
| c.cURLch = nil | |||
| } | |||
| func (c *Client) sendCURL(command string) { | |||
| go func() { | |||
| select { | |||
| case c.cURLch <- command: | |||
| default: | |||
| } | |||
| }() | |||
| } | |||
| func (c *Client) RecvCURL() string { | |||
| return <-c.cURLch | |||
| } | |||
| // saveConfig saves the current config using c.persistence. | |||
| func (c *Client) saveConfig() error { | |||
| if c.persistence != nil { | |||
| b, err := json.Marshal(c) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = c.persistence.Write(b) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // MarshalJSON implements the Marshaller interface | |||
| // as defined by the standard JSON package. | |||
| func (c *Client) MarshalJSON() ([]byte, error) { | |||
| b, err := json.Marshal(struct { | |||
| Config Config `json:"config"` | |||
| Cluster *Cluster `json:"cluster"` | |||
| }{ | |||
| Config: c.config, | |||
| Cluster: c.cluster, | |||
| }) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return b, nil | |||
| } | |||
| // UnmarshalJSON implements the Unmarshaller interface | |||
| // as defined by the standard JSON package. | |||
| func (c *Client) UnmarshalJSON(b []byte) error { | |||
| temp := struct { | |||
| Config Config `json:"config"` | |||
| Cluster *Cluster `json:"cluster"` | |||
| }{} | |||
| err := json.Unmarshal(b, &temp) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| c.cluster = temp.Cluster | |||
| c.config = temp.Config | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,54 @@ | |||
| package etcd | |||
| import ( | |||
| "math/rand" | |||
| "strings" | |||
| "sync" | |||
| ) | |||
| type Cluster struct { | |||
| Leader string `json:"leader"` | |||
| Machines []string `json:"machines"` | |||
| picked int | |||
| mu sync.RWMutex | |||
| } | |||
| func NewCluster(machines []string) *Cluster { | |||
| // if an empty slice was sent in then just assume HTTP 4001 on localhost | |||
| if len(machines) == 0 { | |||
| machines = []string{"http://127.0.0.1:4001"} | |||
| } | |||
| machines = shuffleStringSlice(machines) | |||
| logger.Debug("Shuffle cluster machines", machines) | |||
| // default leader and machines | |||
| return &Cluster{ | |||
| Leader: "", | |||
| Machines: machines, | |||
| picked: rand.Intn(len(machines)), | |||
| } | |||
| } | |||
| func (cl *Cluster) failure() { | |||
| cl.mu.Lock() | |||
| defer cl.mu.Unlock() | |||
| cl.picked = (cl.picked + 1) % len(cl.Machines) | |||
| } | |||
| func (cl *Cluster) pick() string { | |||
| cl.mu.Lock() | |||
| defer cl.mu.Unlock() | |||
| return cl.Machines[cl.picked] | |||
| } | |||
| func (cl *Cluster) updateFromStr(machines string) { | |||
| cl.mu.Lock() | |||
| defer cl.mu.Unlock() | |||
| cl.Machines = strings.Split(machines, ",") | |||
| for i := range cl.Machines { | |||
| cl.Machines[i] = strings.TrimSpace(cl.Machines[i]) | |||
| } | |||
| cl.Machines = shuffleStringSlice(cl.Machines) | |||
| cl.picked = rand.Intn(len(cl.Machines)) | |||
| } | |||
| @@ -0,0 +1,34 @@ | |||
| package etcd | |||
| import "fmt" | |||
| func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) { | |||
| raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) { | |||
| if prevValue == "" && prevIndex == 0 { | |||
| return nil, fmt.Errorf("You must give either prevValue or prevIndex.") | |||
| } | |||
| options := Options{} | |||
| if prevValue != "" { | |||
| options["prevValue"] = prevValue | |||
| } | |||
| if prevIndex != 0 { | |||
| options["prevIndex"] = prevIndex | |||
| } | |||
| raw, err := c.delete(key, options) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw, err | |||
| } | |||
| @@ -0,0 +1,36 @@ | |||
| package etcd | |||
| import "fmt" | |||
| func (c *Client) CompareAndSwap(key string, value string, ttl uint64, | |||
| prevValue string, prevIndex uint64) (*Response, error) { | |||
| raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64, | |||
| prevValue string, prevIndex uint64) (*RawResponse, error) { | |||
| if prevValue == "" && prevIndex == 0 { | |||
| return nil, fmt.Errorf("You must give either prevValue or prevIndex.") | |||
| } | |||
| options := Options{} | |||
| if prevValue != "" { | |||
| options["prevValue"] = prevValue | |||
| } | |||
| if prevIndex != 0 { | |||
| options["prevIndex"] = prevIndex | |||
| } | |||
| raw, err := c.put(key, value, ttl, options) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw, err | |||
| } | |||
| @@ -0,0 +1,55 @@ | |||
| package etcd | |||
| import ( | |||
| "fmt" | |||
| "io/ioutil" | |||
| "log" | |||
| "strings" | |||
| ) | |||
| var logger *etcdLogger | |||
| func SetLogger(l *log.Logger) { | |||
| logger = &etcdLogger{l} | |||
| } | |||
| func GetLogger() *log.Logger { | |||
| return logger.log | |||
| } | |||
| type etcdLogger struct { | |||
| log *log.Logger | |||
| } | |||
| func (p *etcdLogger) Debug(args ...interface{}) { | |||
| msg := "DEBUG: " + fmt.Sprint(args...) | |||
| p.log.Println(msg) | |||
| } | |||
| func (p *etcdLogger) Debugf(f string, args ...interface{}) { | |||
| msg := "DEBUG: " + fmt.Sprintf(f, args...) | |||
| // Append newline if necessary | |||
| if !strings.HasSuffix(msg, "\n") { | |||
| msg = msg + "\n" | |||
| } | |||
| p.log.Print(msg) | |||
| } | |||
| func (p *etcdLogger) Warning(args ...interface{}) { | |||
| msg := "WARNING: " + fmt.Sprint(args...) | |||
| p.log.Println(msg) | |||
| } | |||
| func (p *etcdLogger) Warningf(f string, args ...interface{}) { | |||
| msg := "WARNING: " + fmt.Sprintf(f, args...) | |||
| // Append newline if necessary | |||
| if !strings.HasSuffix(msg, "\n") { | |||
| msg = msg + "\n" | |||
| } | |||
| p.log.Print(msg) | |||
| } | |||
| func init() { | |||
| // Default logger uses the go default log. | |||
| SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags)) | |||
| } | |||
| @@ -0,0 +1,40 @@ | |||
| package etcd | |||
| // Delete deletes the given key. | |||
| // | |||
| // When recursive set to false, if the key points to a | |||
| // directory the method will fail. | |||
| // | |||
| // When recursive set to true, if the key points to a file, | |||
| // the file will be deleted; if the key points to a directory, | |||
| // then everything under the directory (including all child directories) | |||
| // will be deleted. | |||
| func (c *Client) Delete(key string, recursive bool) (*Response, error) { | |||
| raw, err := c.RawDelete(key, recursive, false) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // DeleteDir deletes an empty directory or a key value pair | |||
| func (c *Client) DeleteDir(key string) (*Response, error) { | |||
| raw, err := c.RawDelete(key, false, true) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "recursive": recursive, | |||
| "dir": dir, | |||
| } | |||
| return c.delete(key, ops) | |||
| } | |||
| @@ -0,0 +1,49 @@ | |||
| package etcd | |||
| import ( | |||
| "encoding/json" | |||
| "fmt" | |||
| ) | |||
| const ( | |||
| ErrCodeEtcdNotReachable = 501 | |||
| ErrCodeUnhandledHTTPStatus = 502 | |||
| ) | |||
| var ( | |||
| errorMap = map[int]string{ | |||
| ErrCodeEtcdNotReachable: "All the given peers are not reachable", | |||
| } | |||
| ) | |||
| type EtcdError struct { | |||
| ErrorCode int `json:"errorCode"` | |||
| Message string `json:"message"` | |||
| Cause string `json:"cause,omitempty"` | |||
| Index uint64 `json:"index"` | |||
| } | |||
| func (e EtcdError) Error() string { | |||
| return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index) | |||
| } | |||
| func newError(errorCode int, cause string, index uint64) *EtcdError { | |||
| return &EtcdError{ | |||
| ErrorCode: errorCode, | |||
| Message: errorMap[errorCode], | |||
| Cause: cause, | |||
| Index: index, | |||
| } | |||
| } | |||
| func handleError(b []byte) error { | |||
| etcdErr := new(EtcdError) | |||
| err := json.Unmarshal(b, etcdErr) | |||
| if err != nil { | |||
| logger.Warningf("cannot unmarshal etcd error: %v", err) | |||
| return err | |||
| } | |||
| return etcdErr | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| package etcd | |||
| // Get gets the file or directory associated with the given key. | |||
| // If the key points to a directory, files and directories under | |||
| // it will be returned in sorted or unsorted order, depending on | |||
| // the sort flag. | |||
| // If recursive is set to false, contents under child directories | |||
| // will not be returned. | |||
| // If recursive is set to true, all the contents will be returned. | |||
| func (c *Client) Get(key string, sort, recursive bool) (*Response, error) { | |||
| raw, err := c.RawGet(key, sort, recursive) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) { | |||
| var q bool | |||
| if c.config.Consistency == STRONG_CONSISTENCY { | |||
| q = true | |||
| } | |||
| ops := Options{ | |||
| "recursive": recursive, | |||
| "sorted": sort, | |||
| "quorum": q, | |||
| } | |||
| return c.get(key, ops) | |||
| } | |||
| @@ -0,0 +1,30 @@ | |||
| package etcd | |||
| import "encoding/json" | |||
| type Member struct { | |||
| ID string `json:"id"` | |||
| Name string `json:"name"` | |||
| PeerURLs []string `json:"peerURLs"` | |||
| ClientURLs []string `json:"clientURLs"` | |||
| } | |||
| type memberCollection []Member | |||
| func (c *memberCollection) UnmarshalJSON(data []byte) error { | |||
| d := struct { | |||
| Members []Member | |||
| }{} | |||
| if err := json.Unmarshal(data, &d); err != nil { | |||
| return err | |||
| } | |||
| if d.Members == nil { | |||
| *c = make([]Member, 0) | |||
| return nil | |||
| } | |||
| *c = d.Members | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,72 @@ | |||
| package etcd | |||
| import ( | |||
| "fmt" | |||
| "net/url" | |||
| "reflect" | |||
| ) | |||
| type Options map[string]interface{} | |||
| // An internally-used data structure that represents a mapping | |||
| // between valid options and their kinds | |||
| type validOptions map[string]reflect.Kind | |||
| // Valid options for GET, PUT, POST, DELETE | |||
| // Using CAPITALIZED_UNDERSCORE to emphasize that these | |||
| // values are meant to be used as constants. | |||
| var ( | |||
| VALID_GET_OPTIONS = validOptions{ | |||
| "recursive": reflect.Bool, | |||
| "quorum": reflect.Bool, | |||
| "sorted": reflect.Bool, | |||
| "wait": reflect.Bool, | |||
| "waitIndex": reflect.Uint64, | |||
| } | |||
| VALID_PUT_OPTIONS = validOptions{ | |||
| "prevValue": reflect.String, | |||
| "prevIndex": reflect.Uint64, | |||
| "prevExist": reflect.Bool, | |||
| "dir": reflect.Bool, | |||
| } | |||
| VALID_POST_OPTIONS = validOptions{} | |||
| VALID_DELETE_OPTIONS = validOptions{ | |||
| "recursive": reflect.Bool, | |||
| "dir": reflect.Bool, | |||
| "prevValue": reflect.String, | |||
| "prevIndex": reflect.Uint64, | |||
| } | |||
| ) | |||
| // Convert options to a string of HTML parameters | |||
| func (ops Options) toParameters(validOps validOptions) (string, error) { | |||
| p := "?" | |||
| values := url.Values{} | |||
| if ops == nil { | |||
| return "", nil | |||
| } | |||
| for k, v := range ops { | |||
| // Check if the given option is valid (that it exists) | |||
| kind := validOps[k] | |||
| if kind == reflect.Invalid { | |||
| return "", fmt.Errorf("Invalid option: %v", k) | |||
| } | |||
| // Check if the given option is of the valid type | |||
| t := reflect.TypeOf(v) | |||
| if kind != t.Kind() { | |||
| return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.", | |||
| k, kind, t.Kind()) | |||
| } | |||
| values.Set(k, fmt.Sprintf("%v", v)) | |||
| } | |||
| p += values.Encode() | |||
| return p, nil | |||
| } | |||
| @@ -0,0 +1,403 @@ | |||
| package etcd | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "io/ioutil" | |||
| "net/http" | |||
| "net/url" | |||
| "path" | |||
| "strings" | |||
| "sync" | |||
| "time" | |||
| ) | |||
| // Errors introduced by handling requests | |||
| var ( | |||
| ErrRequestCancelled = errors.New("sending request is cancelled") | |||
| ) | |||
| type RawRequest struct { | |||
| Method string | |||
| RelativePath string | |||
| Values url.Values | |||
| Cancel <-chan bool | |||
| } | |||
| // NewRawRequest returns a new RawRequest | |||
| func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest { | |||
| return &RawRequest{ | |||
| Method: method, | |||
| RelativePath: relativePath, | |||
| Values: values, | |||
| Cancel: cancel, | |||
| } | |||
| } | |||
| // getCancelable issues a cancelable GET request | |||
| func (c *Client) getCancelable(key string, options Options, | |||
| cancel <-chan bool) (*RawResponse, error) { | |||
| logger.Debugf("get %s [%s]", key, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| str, err := options.toParameters(VALID_GET_OPTIONS) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p += str | |||
| req := NewRawRequest("GET", p, nil, cancel) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // get issues a GET request | |||
| func (c *Client) get(key string, options Options) (*RawResponse, error) { | |||
| return c.getCancelable(key, options, nil) | |||
| } | |||
| // put issues a PUT request | |||
| func (c *Client) put(key string, value string, ttl uint64, | |||
| options Options) (*RawResponse, error) { | |||
| logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| str, err := options.toParameters(VALID_PUT_OPTIONS) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p += str | |||
| req := NewRawRequest("PUT", p, buildValues(value, ttl), nil) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // post issues a POST request | |||
| func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| req := NewRawRequest("POST", p, buildValues(value, ttl), nil) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // delete issues a DELETE request | |||
| func (c *Client) delete(key string, options Options) (*RawResponse, error) { | |||
| logger.Debugf("delete %s [%s]", key, c.cluster.pick()) | |||
| p := keyToPath(key) | |||
| str, err := options.toParameters(VALID_DELETE_OPTIONS) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| p += str | |||
| req := NewRawRequest("DELETE", p, nil, nil) | |||
| resp, err := c.SendRequest(req) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return resp, nil | |||
| } | |||
| // SendRequest sends a HTTP request and returns a Response as defined by etcd | |||
| func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { | |||
| var req *http.Request | |||
| var resp *http.Response | |||
| var httpPath string | |||
| var err error | |||
| var respBody []byte | |||
| var numReqs = 1 | |||
| checkRetry := c.CheckRetry | |||
| if checkRetry == nil { | |||
| checkRetry = DefaultCheckRetry | |||
| } | |||
| cancelled := make(chan bool, 1) | |||
| reqLock := new(sync.Mutex) | |||
| if rr.Cancel != nil { | |||
| cancelRoutine := make(chan bool) | |||
| defer close(cancelRoutine) | |||
| go func() { | |||
| select { | |||
| case <-rr.Cancel: | |||
| cancelled <- true | |||
| logger.Debug("send.request is cancelled") | |||
| case <-cancelRoutine: | |||
| return | |||
| } | |||
| // Repeat canceling request until this thread is stopped | |||
| // because we have no idea about whether it succeeds. | |||
| for { | |||
| reqLock.Lock() | |||
| c.httpClient.Transport.(*http.Transport).CancelRequest(req) | |||
| reqLock.Unlock() | |||
| select { | |||
| case <-time.After(100 * time.Millisecond): | |||
| case <-cancelRoutine: | |||
| return | |||
| } | |||
| } | |||
| }() | |||
| } | |||
| // If we connect to a follower and consistency is required, retry until | |||
| // we connect to a leader | |||
| sleep := 25 * time.Millisecond | |||
| maxSleep := time.Second | |||
| for attempt := 0; ; attempt++ { | |||
| if attempt > 0 { | |||
| select { | |||
| case <-cancelled: | |||
| return nil, ErrRequestCancelled | |||
| case <-time.After(sleep): | |||
| sleep = sleep * 2 | |||
| if sleep > maxSleep { | |||
| sleep = maxSleep | |||
| } | |||
| } | |||
| } | |||
| logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath) | |||
| // get httpPath if not set | |||
| if httpPath == "" { | |||
| httpPath = c.getHttpPath(rr.RelativePath) | |||
| } | |||
| // Return a cURL command if curlChan is set | |||
| if c.cURLch != nil { | |||
| command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath) | |||
| for key, value := range rr.Values { | |||
| command += fmt.Sprintf(" -d %s=%s", key, value[0]) | |||
| } | |||
| if c.credentials != nil { | |||
| command += fmt.Sprintf(" -u %s", c.credentials.username) | |||
| } | |||
| c.sendCURL(command) | |||
| } | |||
| logger.Debug("send.request.to ", httpPath, " | method ", rr.Method) | |||
| req, err := func() (*http.Request, error) { | |||
| reqLock.Lock() | |||
| defer reqLock.Unlock() | |||
| if rr.Values == nil { | |||
| if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil { | |||
| return nil, err | |||
| } | |||
| } else { | |||
| body := strings.NewReader(rr.Values.Encode()) | |||
| if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil { | |||
| return nil, err | |||
| } | |||
| req.Header.Set("Content-Type", | |||
| "application/x-www-form-urlencoded; param=value") | |||
| } | |||
| return req, nil | |||
| }() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if c.credentials != nil { | |||
| req.SetBasicAuth(c.credentials.username, c.credentials.password) | |||
| } | |||
| resp, err = c.httpClient.Do(req) | |||
| // clear previous httpPath | |||
| httpPath = "" | |||
| defer func() { | |||
| if resp != nil { | |||
| resp.Body.Close() | |||
| } | |||
| }() | |||
| // If the request was cancelled, return ErrRequestCancelled directly | |||
| select { | |||
| case <-cancelled: | |||
| return nil, ErrRequestCancelled | |||
| default: | |||
| } | |||
| numReqs++ | |||
| // network error, change a machine! | |||
| if err != nil { | |||
| logger.Debug("network error: ", err.Error()) | |||
| lastResp := http.Response{} | |||
| if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil { | |||
| return nil, checkErr | |||
| } | |||
| c.cluster.failure() | |||
| continue | |||
| } | |||
| // if there is no error, it should receive response | |||
| logger.Debug("recv.response.from ", httpPath) | |||
| if validHttpStatusCode[resp.StatusCode] { | |||
| // try to read byte code and break the loop | |||
| respBody, err = ioutil.ReadAll(resp.Body) | |||
| if err == nil { | |||
| logger.Debug("recv.success ", httpPath) | |||
| break | |||
| } | |||
| // ReadAll error may be caused due to cancel request | |||
| select { | |||
| case <-cancelled: | |||
| return nil, ErrRequestCancelled | |||
| default: | |||
| } | |||
| if err == io.ErrUnexpectedEOF { | |||
| // underlying connection was closed prematurely, probably by timeout | |||
| // TODO: empty body or unexpectedEOF can cause http.Transport to get hosed; | |||
| // this allows the client to detect that and take evasive action. Need | |||
| // to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed. | |||
| respBody = []byte{} | |||
| break | |||
| } | |||
| } | |||
| if resp.StatusCode == http.StatusTemporaryRedirect { | |||
| u, err := resp.Location() | |||
| if err != nil { | |||
| logger.Warning(err) | |||
| } else { | |||
| // set httpPath for following redirection | |||
| httpPath = u.String() | |||
| } | |||
| resp.Body.Close() | |||
| continue | |||
| } | |||
| if checkErr := checkRetry(c.cluster, numReqs, *resp, | |||
| errors.New("Unexpected HTTP status code")); checkErr != nil { | |||
| return nil, checkErr | |||
| } | |||
| resp.Body.Close() | |||
| } | |||
| r := &RawResponse{ | |||
| StatusCode: resp.StatusCode, | |||
| Body: respBody, | |||
| Header: resp.Header, | |||
| } | |||
| return r, nil | |||
| } | |||
| // DefaultCheckRetry defines the retrying behaviour for bad HTTP requests | |||
| // If we have retried 2 * machine number, stop retrying. | |||
| // If status code is InternalServerError, sleep for 200ms. | |||
| func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response, | |||
| err error) error { | |||
| if numReqs > 2*len(cluster.Machines) { | |||
| errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err) | |||
| return newError(ErrCodeEtcdNotReachable, errStr, 0) | |||
| } | |||
| if isEmptyResponse(lastResp) { | |||
| // always retry if it failed to get response from one machine | |||
| return nil | |||
| } | |||
| if !shouldRetry(lastResp) { | |||
| body := []byte("nil") | |||
| if lastResp.Body != nil { | |||
| if b, err := ioutil.ReadAll(lastResp.Body); err == nil { | |||
| body = b | |||
| } | |||
| } | |||
| errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body) | |||
| return newError(ErrCodeUnhandledHTTPStatus, errStr, 0) | |||
| } | |||
| // sleep some time and expect leader election finish | |||
| time.Sleep(time.Millisecond * 200) | |||
| logger.Warning("bad response status code ", lastResp.StatusCode) | |||
| return nil | |||
| } | |||
| func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 } | |||
| // shouldRetry returns whether the reponse deserves retry. | |||
| func shouldRetry(r http.Response) bool { | |||
| // TODO: only retry when the cluster is in leader election | |||
| // We cannot do it exactly because etcd doesn't support it well. | |||
| return r.StatusCode == http.StatusInternalServerError | |||
| } | |||
| func (c *Client) getHttpPath(s ...string) string { | |||
| fullPath := c.cluster.pick() + "/" + version | |||
| for _, seg := range s { | |||
| fullPath = fullPath + "/" + seg | |||
| } | |||
| return fullPath | |||
| } | |||
| // buildValues builds a url.Values map according to the given value and ttl | |||
| func buildValues(value string, ttl uint64) url.Values { | |||
| v := url.Values{} | |||
| if value != "" { | |||
| v.Set("value", value) | |||
| } | |||
| if ttl > 0 { | |||
| v.Set("ttl", fmt.Sprintf("%v", ttl)) | |||
| } | |||
| return v | |||
| } | |||
| // convert key string to http path exclude version, including URL escaping | |||
| // for example: key[foo] -> path[keys/foo] | |||
| // key[/%z] -> path[keys/%25z] | |||
| // key[/] -> path[keys/] | |||
| func keyToPath(key string) string { | |||
| // URL-escape our key, except for slashes | |||
| p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1) | |||
| // corner case: if key is "/" or "//" ect | |||
| // path join will clear the tailing "/" | |||
| // we need to add it back | |||
| if p == "keys" { | |||
| p = "keys/" | |||
| } | |||
| return p | |||
| } | |||
| @@ -0,0 +1,93 @@ | |||
| package etcd | |||
| //go:generate codecgen -d 1978 -o response.generated.go response.go | |||
| import ( | |||
| "net/http" | |||
| "strconv" | |||
| "time" | |||
| "github.com/ugorji/go/codec" | |||
| ) | |||
| const ( | |||
| rawResponse = iota | |||
| normalResponse | |||
| ) | |||
| type responseType int | |||
| type RawResponse struct { | |||
| StatusCode int | |||
| Body []byte | |||
| Header http.Header | |||
| } | |||
| var ( | |||
| validHttpStatusCode = map[int]bool{ | |||
| http.StatusCreated: true, | |||
| http.StatusOK: true, | |||
| http.StatusBadRequest: true, | |||
| http.StatusNotFound: true, | |||
| http.StatusPreconditionFailed: true, | |||
| http.StatusForbidden: true, | |||
| http.StatusUnauthorized: true, | |||
| } | |||
| ) | |||
| // Unmarshal parses RawResponse and stores the result in Response | |||
| func (rr *RawResponse) Unmarshal() (*Response, error) { | |||
| if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated { | |||
| return nil, handleError(rr.Body) | |||
| } | |||
| resp := new(Response) | |||
| err := codec.NewDecoderBytes(rr.Body, new(codec.JsonHandle)).Decode(resp) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // attach index and term to response | |||
| resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64) | |||
| resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64) | |||
| resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64) | |||
| return resp, nil | |||
| } | |||
| type Response struct { | |||
| Action string `json:"action"` | |||
| Node *Node `json:"node"` | |||
| PrevNode *Node `json:"prevNode,omitempty"` | |||
| EtcdIndex uint64 `json:"etcdIndex"` | |||
| RaftIndex uint64 `json:"raftIndex"` | |||
| RaftTerm uint64 `json:"raftTerm"` | |||
| } | |||
| type Node struct { | |||
| Key string `json:"key, omitempty"` | |||
| Value string `json:"value,omitempty"` | |||
| Dir bool `json:"dir,omitempty"` | |||
| Expiration *time.Time `json:"expiration,omitempty"` | |||
| TTL int64 `json:"ttl,omitempty"` | |||
| Nodes Nodes `json:"nodes,omitempty"` | |||
| ModifiedIndex uint64 `json:"modifiedIndex,omitempty"` | |||
| CreatedIndex uint64 `json:"createdIndex,omitempty"` | |||
| } | |||
| type Nodes []*Node | |||
| // interfaces for sorting | |||
| func (ns Nodes) Len() int { | |||
| return len(ns) | |||
| } | |||
| func (ns Nodes) Less(i, j int) bool { | |||
| return ns[i].Key < ns[j].Key | |||
| } | |||
| func (ns Nodes) Swap(i, j int) { | |||
| ns[i], ns[j] = ns[j], ns[i] | |||
| } | |||
| @@ -0,0 +1,137 @@ | |||
| package etcd | |||
| // Set sets the given key to the given value. | |||
| // It will create a new key value pair or replace the old one. | |||
| // It will not replace a existing directory. | |||
| func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawSet(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // SetDir sets the given key to a directory. | |||
| // It will create a new directory or replace the old key value pair by a directory. | |||
| // It will not replace a existing directory. | |||
| func (c *Client) SetDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawSetDir(key, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // CreateDir creates a directory. It succeeds only if | |||
| // the given key does not yet exist. | |||
| func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawCreateDir(key, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // UpdateDir updates the given directory. It succeeds only if the | |||
| // given key already exists. | |||
| func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawUpdateDir(key, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // Create creates a file with the given value under the given key. It succeeds | |||
| // only if the given key does not yet exist. | |||
| func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawCreate(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // CreateInOrder creates a file with a key that's guaranteed to be higher than other | |||
| // keys in the given directory. It is useful for creating queues. | |||
| func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawCreateInOrder(dir, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| // Update updates the given key to the given value. It succeeds only if the | |||
| // given key already exists. | |||
| func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) { | |||
| raw, err := c.RawUpdate(key, value, ttl) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": true, | |||
| "dir": true, | |||
| } | |||
| return c.put(key, "", ttl, ops) | |||
| } | |||
| func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": false, | |||
| "dir": true, | |||
| } | |||
| return c.put(key, "", ttl, ops) | |||
| } | |||
| func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| return c.put(key, value, ttl, nil) | |||
| } | |||
| func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "dir": true, | |||
| } | |||
| return c.put(key, "", ttl, ops) | |||
| } | |||
| func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": true, | |||
| } | |||
| return c.put(key, value, ttl, ops) | |||
| } | |||
| func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) { | |||
| ops := Options{ | |||
| "prevExist": false, | |||
| } | |||
| return c.put(key, value, ttl, ops) | |||
| } | |||
| func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) { | |||
| return c.post(dir, value, ttl) | |||
| } | |||
| @@ -0,0 +1,19 @@ | |||
| package etcd | |||
| import ( | |||
| "math/rand" | |||
| ) | |||
| func shuffleStringSlice(cards []string) []string { | |||
| size := len(cards) | |||
| //Do not need to copy if nothing changed | |||
| if size <= 1 { | |||
| return cards | |||
| } | |||
| shuffled := make([]string, size) | |||
| index := rand.Perm(size) | |||
| for i := range cards { | |||
| shuffled[index[i]] = cards[i] | |||
| } | |||
| return shuffled | |||
| } | |||
| @@ -0,0 +1,6 @@ | |||
| package etcd | |||
| const ( | |||
| version = "v2" | |||
| packageVersion = "v2.0.0+git" | |||
| ) | |||
| @@ -0,0 +1,103 @@ | |||
| package etcd | |||
| import ( | |||
| "errors" | |||
| ) | |||
| // Errors introduced by the Watch command. | |||
| var ( | |||
| ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel") | |||
| ) | |||
| // If recursive is set to true the watch returns the first change under the given | |||
| // prefix since the given index. | |||
| // | |||
| // If recursive is set to false the watch returns the first change to the given key | |||
| // since the given index. | |||
| // | |||
| // To watch for the latest change, set waitIndex = 0. | |||
| // | |||
| // If a receiver channel is given, it will be a long-term watch. Watch will block at the | |||
| //channel. After someone receives the channel, it will go on to watch that | |||
| // prefix. If a stop channel is given, the client can close long-term watch using | |||
| // the stop channel. | |||
| func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool, | |||
| receiver chan *Response, stop chan bool) (*Response, error) { | |||
| logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader) | |||
| if receiver == nil { | |||
| raw, err := c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return raw.Unmarshal() | |||
| } | |||
| defer close(receiver) | |||
| for { | |||
| raw, err := c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| resp, err := raw.Unmarshal() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| waitIndex = resp.Node.ModifiedIndex + 1 | |||
| receiver <- resp | |||
| } | |||
| } | |||
| func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool, | |||
| receiver chan *RawResponse, stop chan bool) (*RawResponse, error) { | |||
| logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader) | |||
| if receiver == nil { | |||
| return c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| } | |||
| for { | |||
| raw, err := c.watchOnce(prefix, waitIndex, recursive, stop) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| resp, err := raw.Unmarshal() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| waitIndex = resp.Node.ModifiedIndex + 1 | |||
| receiver <- raw | |||
| } | |||
| } | |||
| // helper func | |||
| // return when there is change under the given prefix | |||
| func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) { | |||
| options := Options{ | |||
| "wait": true, | |||
| } | |||
| if waitIndex > 0 { | |||
| options["waitIndex"] = waitIndex | |||
| } | |||
| if recursive { | |||
| options["recursive"] = true | |||
| } | |||
| resp, err := c.getCancelable(key, options, stop) | |||
| if err == ErrRequestCancelled { | |||
| return nil, ErrWatchStoppedByUser | |||
| } | |||
| return resp, err | |||
| } | |||
| @@ -0,0 +1,23 @@ | |||
| Copyright (c) 2014, Elazar Leibovich | |||
| All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are met: | |||
| * Redistributions of source code must retain the above copyright notice, this | |||
| list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above copyright notice, | |||
| this list of conditions and the following disclaimer in the documentation | |||
| and/or other materials provided with the distribution. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||
| AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
| IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |||
| DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | |||
| FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
| DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | |||
| SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | |||
| CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |||
| OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,46 @@ | |||
| # go-bindata-assetfs | |||
| Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`. | |||
| [GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs) | |||
| ### Installation | |||
| Install with | |||
| $ go get github.com/jteeuwen/go-bindata/... | |||
| $ go get github.com/elazarl/go-bindata-assetfs/... | |||
| ### Creating embedded data | |||
| Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage, | |||
| instead of running `go-bindata` run `go-bindata-assetfs`. | |||
| The tool will create a `bindata_assetfs.go` file, which contains the embedded data. | |||
| A typical use case is | |||
| $ go-bindata-assetfs data/... | |||
| ### Using assetFS in your code | |||
| The generated file provides an `assetFS()` function that returns a `http.Filesystem` | |||
| wrapping the embedded files. What you usually want to do is: | |||
| http.Handle("/", http.FileServer(assetFS())) | |||
| This would run an HTTP server serving the embedded files. | |||
| ## Without running binary tool | |||
| You can always just run the `go-bindata` tool, and then | |||
| use | |||
| import "github.com/elazarl/go-bindata-assetfs" | |||
| ... | |||
| http.Handle("/", | |||
| http.FileServer( | |||
| &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo, Prefix: "data"})) | |||
| to serve files embedded from the `data` directory. | |||
| @@ -0,0 +1,158 @@ | |||
| package assetfs | |||
| import ( | |||
| "bytes" | |||
| "errors" | |||
| "io" | |||
| "io/ioutil" | |||
| "net/http" | |||
| "os" | |||
| "path" | |||
| "path/filepath" | |||
| "time" | |||
| ) | |||
| var ( | |||
| defaultFileTimestamp = time.Now() | |||
| ) | |||
| // FakeFile implements os.FileInfo interface for a given path and size | |||
| type FakeFile struct { | |||
| // Path is the path of this file | |||
| Path string | |||
| // Dir marks of the path is a directory | |||
| Dir bool | |||
| // Len is the length of the fake file, zero if it is a directory | |||
| Len int64 | |||
| // Timestamp is the ModTime of this file | |||
| Timestamp time.Time | |||
| } | |||
| func (f *FakeFile) Name() string { | |||
| _, name := filepath.Split(f.Path) | |||
| return name | |||
| } | |||
| func (f *FakeFile) Mode() os.FileMode { | |||
| mode := os.FileMode(0644) | |||
| if f.Dir { | |||
| return mode | os.ModeDir | |||
| } | |||
| return mode | |||
| } | |||
| func (f *FakeFile) ModTime() time.Time { | |||
| return f.Timestamp | |||
| } | |||
| func (f *FakeFile) Size() int64 { | |||
| return f.Len | |||
| } | |||
| func (f *FakeFile) IsDir() bool { | |||
| return f.Mode().IsDir() | |||
| } | |||
| func (f *FakeFile) Sys() interface{} { | |||
| return nil | |||
| } | |||
| // AssetFile implements http.File interface for a no-directory file with content | |||
| type AssetFile struct { | |||
| *bytes.Reader | |||
| io.Closer | |||
| FakeFile | |||
| } | |||
| func NewAssetFile(name string, content []byte, timestamp time.Time) *AssetFile { | |||
| if timestamp.IsZero() { | |||
| timestamp = defaultFileTimestamp | |||
| } | |||
| return &AssetFile{ | |||
| bytes.NewReader(content), | |||
| ioutil.NopCloser(nil), | |||
| FakeFile{name, false, int64(len(content)), timestamp}} | |||
| } | |||
| func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) { | |||
| return nil, errors.New("not a directory") | |||
| } | |||
| func (f *AssetFile) Size() int64 { | |||
| return f.FakeFile.Size() | |||
| } | |||
| func (f *AssetFile) Stat() (os.FileInfo, error) { | |||
| return f, nil | |||
| } | |||
| // AssetDirectory implements http.File interface for a directory | |||
| type AssetDirectory struct { | |||
| AssetFile | |||
| ChildrenRead int | |||
| Children []os.FileInfo | |||
| } | |||
| func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory { | |||
| fileinfos := make([]os.FileInfo, 0, len(children)) | |||
| for _, child := range children { | |||
| _, err := fs.AssetDir(filepath.Join(name, child)) | |||
| fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0, time.Time{}}) | |||
| } | |||
| return &AssetDirectory{ | |||
| AssetFile{ | |||
| bytes.NewReader(nil), | |||
| ioutil.NopCloser(nil), | |||
| FakeFile{name, true, 0, time.Time{}}, | |||
| }, | |||
| 0, | |||
| fileinfos} | |||
| } | |||
| func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) { | |||
| if count <= 0 { | |||
| return f.Children, nil | |||
| } | |||
| if f.ChildrenRead+count > len(f.Children) { | |||
| count = len(f.Children) - f.ChildrenRead | |||
| } | |||
| rv := f.Children[f.ChildrenRead : f.ChildrenRead+count] | |||
| f.ChildrenRead += count | |||
| return rv, nil | |||
| } | |||
| func (f *AssetDirectory) Stat() (os.FileInfo, error) { | |||
| return f, nil | |||
| } | |||
| // AssetFS implements http.FileSystem, allowing | |||
| // embedded files to be served from net/http package. | |||
| type AssetFS struct { | |||
| // Asset should return content of file in path if exists | |||
| Asset func(path string) ([]byte, error) | |||
| // AssetDir should return list of files in the path | |||
| AssetDir func(path string) ([]string, error) | |||
| // AssetInfo should return the info of file in path if exists | |||
| AssetInfo func(path string) (os.FileInfo, error) | |||
| // Prefix would be prepended to http requests | |||
| Prefix string | |||
| } | |||
| func (fs *AssetFS) Open(name string) (http.File, error) { | |||
| name = path.Join(fs.Prefix, name) | |||
| if len(name) > 0 && name[0] == '/' { | |||
| name = name[1:] | |||
| } | |||
| if b, err := fs.Asset(name); err == nil { | |||
| timestamp := defaultFileTimestamp | |||
| if info, err := fs.AssetInfo(name); err == nil { | |||
| timestamp = info.ModTime() | |||
| } | |||
| return NewAssetFile(name, b, timestamp), nil | |||
| } | |||
| if children, err := fs.AssetDir(name); err == nil { | |||
| return NewAssetDirectory(name, children, fs), nil | |||
| } else { | |||
| return nil, err | |||
| } | |||
| } | |||
| @@ -0,0 +1,13 @@ | |||
| // assetfs allows packages to serve static content embedded | |||
| // with the go-bindata tool with the standard net/http package. | |||
| // | |||
| // See https://github.com/jteeuwen/go-bindata for more information | |||
| // about embedding binary data with go-bindata. | |||
| // | |||
| // Usage example, after running | |||
| // $ go-bindata data/... | |||
| // use: | |||
| // http.Handle("/", | |||
| // http.FileServer( | |||
| // &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) | |||
| package assetfs | |||
| @@ -0,0 +1,191 @@ | |||
| Apache License | |||
| Version 2.0, January 2004 | |||
| http://www.apache.org/licenses/ | |||
| TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | |||
| 1. Definitions. | |||
| "License" shall mean the terms and conditions for use, reproduction, and | |||
| distribution as defined by Sections 1 through 9 of this document. | |||
| "Licensor" shall mean the copyright owner or entity authorized by the copyright | |||
| owner that is granting the License. | |||
| "Legal Entity" shall mean the union of the acting entity and all other entities | |||
| that control, are controlled by, or are under common control with that entity. | |||
| For the purposes of this definition, "control" means (i) the power, direct or | |||
| indirect, to cause the direction or management of such entity, whether by | |||
| contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the | |||
| outstanding shares, or (iii) beneficial ownership of such entity. | |||
| "You" (or "Your") shall mean an individual or Legal Entity exercising | |||
| permissions granted by this License. | |||
| "Source" form shall mean the preferred form for making modifications, including | |||
| but not limited to software source code, documentation source, and configuration | |||
| files. | |||
| "Object" form shall mean any form resulting from mechanical transformation or | |||
| translation of a Source form, including but not limited to compiled object code, | |||
| generated documentation, and conversions to other media types. | |||
| "Work" shall mean the work of authorship, whether in Source or Object form, made | |||
| available under the License, as indicated by a copyright notice that is included | |||
| in or attached to the work (an example is provided in the Appendix below). | |||
| "Derivative Works" shall mean any work, whether in Source or Object form, that | |||
| is based on (or derived from) the Work and for which the editorial revisions, | |||
| annotations, elaborations, or other modifications represent, as a whole, an | |||
| original work of authorship. For the purposes of this License, Derivative Works | |||
| shall not include works that remain separable from, or merely link (or bind by | |||
| name) to the interfaces of, the Work and Derivative Works thereof. | |||
| "Contribution" shall mean any work of authorship, including the original version | |||
| of the Work and any modifications or additions to that Work or Derivative Works | |||
| thereof, that is intentionally submitted to Licensor for inclusion in the Work | |||
| by the copyright owner or by an individual or Legal Entity authorized to submit | |||
| on behalf of the copyright owner. For the purposes of this definition, | |||
| "submitted" means any form of electronic, verbal, or written communication sent | |||
| to the Licensor or its representatives, including but not limited to | |||
| communication on electronic mailing lists, source code control systems, and | |||
| issue tracking systems that are managed by, or on behalf of, the Licensor for | |||
| the purpose of discussing and improving the Work, but excluding communication | |||
| that is conspicuously marked or otherwise designated in writing by the copyright | |||
| owner as "Not a Contribution." | |||
| "Contributor" shall mean Licensor and any individual or Legal Entity on behalf | |||
| of whom a Contribution has been received by Licensor and subsequently | |||
| incorporated within the Work. | |||
| 2. Grant of Copyright License. | |||
| Subject to the terms and conditions of this License, each Contributor hereby | |||
| grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, | |||
| irrevocable copyright license to reproduce, prepare Derivative Works of, | |||
| publicly display, publicly perform, sublicense, and distribute the Work and such | |||
| Derivative Works in Source or Object form. | |||
| 3. Grant of Patent License. | |||
| Subject to the terms and conditions of this License, each Contributor hereby | |||
| grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, | |||
| irrevocable (except as stated in this section) patent license to make, have | |||
| made, use, offer to sell, sell, import, and otherwise transfer the Work, where | |||
| such license applies only to those patent claims licensable by such Contributor | |||
| that are necessarily infringed by their Contribution(s) alone or by combination | |||
| of their Contribution(s) with the Work to which such Contribution(s) was | |||
| submitted. If You institute patent litigation against any entity (including a | |||
| cross-claim or counterclaim in a lawsuit) alleging that the Work or a | |||
| Contribution incorporated within the Work constitutes direct or contributory | |||
| patent infringement, then any patent licenses granted to You under this License | |||
| for that Work shall terminate as of the date such litigation is filed. | |||
| 4. Redistribution. | |||
| You may reproduce and distribute copies of the Work or Derivative Works thereof | |||
| in any medium, with or without modifications, and in Source or Object form, | |||
| provided that You meet the following conditions: | |||
| You must give any other recipients of the Work or Derivative Works a copy of | |||
| this License; and | |||
| You must cause any modified files to carry prominent notices stating that You | |||
| changed the files; and | |||
| You must retain, in the Source form of any Derivative Works that You distribute, | |||
| all copyright, patent, trademark, and attribution notices from the Source form | |||
| of the Work, excluding those notices that do not pertain to any part of the | |||
| Derivative Works; and | |||
| If the Work includes a "NOTICE" text file as part of its distribution, then any | |||
| Derivative Works that You distribute must include a readable copy of the | |||
| attribution notices contained within such NOTICE file, excluding those notices | |||
| that do not pertain to any part of the Derivative Works, in at least one of the | |||
| following places: within a NOTICE text file distributed as part of the | |||
| Derivative Works; within the Source form or documentation, if provided along | |||
| with the Derivative Works; or, within a display generated by the Derivative | |||
| Works, if and wherever such third-party notices normally appear. The contents of | |||
| the NOTICE file are for informational purposes only and do not modify the | |||
| License. You may add Your own attribution notices within Derivative Works that | |||
| You distribute, alongside or as an addendum to the NOTICE text from the Work, | |||
| provided that such additional attribution notices cannot be construed as | |||
| modifying the License. | |||
| You may add Your own copyright statement to Your modifications and may provide | |||
| additional or different license terms and conditions for use, reproduction, or | |||
| distribution of Your modifications, or for any such Derivative Works as a whole, | |||
| provided Your use, reproduction, and distribution of the Work otherwise complies | |||
| with the conditions stated in this License. | |||
| 5. Submission of Contributions. | |||
| Unless You explicitly state otherwise, any Contribution intentionally submitted | |||
| for inclusion in the Work by You to the Licensor shall be under the terms and | |||
| conditions of this License, without any additional terms or conditions. | |||
| Notwithstanding the above, nothing herein shall supersede or modify the terms of | |||
| any separate license agreement you may have executed with Licensor regarding | |||
| such Contributions. | |||
| 6. Trademarks. | |||
| This License does not grant permission to use the trade names, trademarks, | |||
| service marks, or product names of the Licensor, except as required for | |||
| reasonable and customary use in describing the origin of the Work and | |||
| reproducing the content of the NOTICE file. | |||
| 7. Disclaimer of Warranty. | |||
| Unless required by applicable law or agreed to in writing, Licensor provides the | |||
| Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, | |||
| including, without limitation, any warranties or conditions of TITLE, | |||
| NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are | |||
| solely responsible for determining the appropriateness of using or | |||
| redistributing the Work and assume any risks associated with Your exercise of | |||
| permissions under this License. | |||
| 8. Limitation of Liability. | |||
| In no event and under no legal theory, whether in tort (including negligence), | |||
| contract, or otherwise, unless required by applicable law (such as deliberate | |||
| and grossly negligent acts) or agreed to in writing, shall any Contributor be | |||
| liable to You for damages, including any direct, indirect, special, incidental, | |||
| or consequential damages of any character arising as a result of this License or | |||
| out of the use or inability to use the Work (including but not limited to | |||
| damages for loss of goodwill, work stoppage, computer failure or malfunction, or | |||
| any and all other commercial damages or losses), even if such Contributor has | |||
| been advised of the possibility of such damages. | |||
| 9. Accepting Warranty or Additional Liability. | |||
| While redistributing the Work or Derivative Works thereof, You may choose to | |||
| offer, and charge a fee for, acceptance of support, warranty, indemnity, or | |||
| other liability obligations and/or rights consistent with this License. However, | |||
| in accepting such obligations, You may act only on Your own behalf and on Your | |||
| sole responsibility, not on behalf of any other Contributor, and only if You | |||
| agree to indemnify, defend, and hold each Contributor harmless for any liability | |||
| incurred by, or claims asserted against, such Contributor by reason of your | |||
| accepting any such warranty or additional liability. | |||
| END OF TERMS AND CONDITIONS | |||
| APPENDIX: How to apply the Apache License to your work | |||
| To apply the Apache License to your work, attach the following boilerplate | |||
| notice, with the fields enclosed by brackets "[]" replaced with your own | |||
| identifying information. (Don't include the brackets!) The text should be | |||
| enclosed in the appropriate comment syntax for the file format. We also | |||
| recommend that a file or class name and description of purpose be included on | |||
| the same "printed page" as the copyright notice for easier identification within | |||
| third-party archives. | |||
| Copyright [yyyy] [name of copyright owner] | |||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||
| you may not use this file except in compliance with the License. | |||
| You may obtain a copy of the License at | |||
| http://www.apache.org/licenses/LICENSE-2.0 | |||
| Unless required by applicable law or agreed to in writing, software | |||
| distributed under the License is distributed on an "AS IS" BASIS, | |||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| See the License for the specific language governing permissions and | |||
| limitations under the License. | |||
| @@ -0,0 +1,16 @@ | |||
| # bindata [](https://travis-ci.org/go-macaron/bindata) [](http://gocover.io/github.com/go-macaron/bindata) | |||
| Package bindata is a helper module that allows to use in-memory static and template files for Macaron via [go-bindata](https://github.com/jteeuwen/go-bindata). | |||
| ### Installation | |||
| go get github.com/go-macaron/bindata | |||
| ## Getting Help | |||
| - [API Reference](https://gowalker.org/github.com/go-macaron/bindata) | |||
| - [Documentation](http://go-macaron.com/docs/middlewares/bindata) | |||
| ## License | |||
| This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text. | |||
| @@ -0,0 +1,105 @@ | |||
| // Copyright 2014 Dustin Webber | |||
| // Copyright 2015 The Macaron Authors | |||
| // | |||
| // Licensed under the Apache License, Version 2.0 (the "License"); | |||
| // you may not use this file except in compliance with the License. | |||
| // You may obtain a copy of the License at | |||
| // | |||
| // http://www.apache.org/licenses/LICENSE-2.0 | |||
| // | |||
| // Unless required by applicable law or agreed to in writing, software | |||
| // distributed under the License is distributed on an "AS IS" BASIS, | |||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
| // See the License for the specific language governing permissions and | |||
| // limitations under the License. | |||
| // Package bindata is a helper module that allows to use in-memory static and template files for Macaron. | |||
| package bindata | |||
| import ( | |||
| "os" | |||
| "github.com/elazarl/go-bindata-assetfs" | |||
| "gopkg.in/macaron.v1" | |||
| ) | |||
| const _VERSION = "0.1.0" | |||
| func Version() string { | |||
| return _VERSION | |||
| } | |||
| type ( | |||
| templateFileSystem struct { | |||
| files []macaron.TemplateFile | |||
| } | |||
| templateFile struct { | |||
| name string | |||
| data []byte | |||
| ext string | |||
| } | |||
| Options struct { | |||
| // Asset should return content of file in path if exists | |||
| Asset func(path string) ([]byte, error) | |||
| // AssetDir should return list of files in the path | |||
| AssetDir func(path string) ([]string, error) | |||
| // AssetInfo should return the info of file in path if exists | |||
| AssetInfo func(path string) (os.FileInfo, error) | |||
| // AssetNames should return list of all asset names | |||
| AssetNames func() []string | |||
| // Prefix would be prepended to http requests | |||
| Prefix string | |||
| } | |||
| ) | |||
| func Static(opt Options) *assetfs.AssetFS { | |||
| fs := &assetfs.AssetFS{ | |||
| Asset: opt.Asset, | |||
| AssetDir: opt.AssetDir, | |||
| AssetInfo: opt.AssetInfo, | |||
| Prefix: opt.Prefix, | |||
| } | |||
| return fs | |||
| } | |||
| func (templates templateFileSystem) ListFiles() []macaron.TemplateFile { | |||
| return templates.files | |||
| } | |||
| func (f *templateFile) Name() string { | |||
| return f.name | |||
| } | |||
| func (f *templateFile) Data() []byte { | |||
| return f.data | |||
| } | |||
| func (f *templateFile) Ext() string { | |||
| return f.ext | |||
| } | |||
| func Templates(opt Options) templateFileSystem { | |||
| fs := templateFileSystem{} | |||
| fs.files = make([]macaron.TemplateFile, 0, 10) | |||
| list := opt.AssetNames() | |||
| for _, key := range list { | |||
| ext := macaron.GetExt(key) | |||
| data, err := opt.Asset(key) | |||
| if err != nil { | |||
| continue | |||
| } | |||
| name := (key[0 : len(key)-len(ext)]) | |||
| fs.files = append(fs.files, &templateFile{name, data, ext}) | |||
| } | |||
| return fs | |||
| } | |||
| @@ -0,0 +1,24 @@ | |||
| tidb driver and dialect for github.com/go-xorm/xorm | |||
| ======== | |||
| Currently, we can support tidb for allmost all the operations. | |||
| # How to use | |||
| Just like other supports of xorm, but you should import the three packages: | |||
| ```Go | |||
| import ( | |||
| _ "github.com/pingcap/tidb" | |||
| _ "github.com/go-xorm/tidb" | |||
| "github.com/go-xorm/xorm" | |||
| ) | |||
| //The formate of DataSource name is store://uri/dbname | |||
| // for goleveldb as store | |||
| xorm.NewEngine("tidb", "goleveldb://./tidb/tidb") | |||
| // for memory as store | |||
| xorm.NewEngine("tidb", "memory://tidb/tidb") | |||
| // for boltdb as store | |||
| xorm.NewEngine("tidb", "boltdb://./tidb/tidb") | |||
| ``` | |||
| @@ -0,0 +1,326 @@ | |||
| // Copyright 2015 The Xorm Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package tidb | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/go-xorm/core" | |||
| ) | |||
| type tidb struct { | |||
| core.Base | |||
| } | |||
| func (db *tidb) Init(d *core.DB, uri *core.Uri, drivername, dataSourceName string) error { | |||
| return db.Base.Init(d, db, uri, drivername, dataSourceName) | |||
| } | |||
| func (db *tidb) SqlType(c *core.Column) string { | |||
| var res string | |||
| switch t := c.SQLType.Name; t { | |||
| case core.Bool: | |||
| res = core.Bool | |||
| case core.Serial: | |||
| c.IsAutoIncrement = true | |||
| c.IsPrimaryKey = true | |||
| c.Nullable = false | |||
| res = core.Int | |||
| case core.BigSerial: | |||
| c.IsAutoIncrement = true | |||
| c.IsPrimaryKey = true | |||
| c.Nullable = false | |||
| res = core.BigInt | |||
| case core.Bytea: | |||
| res = core.Blob | |||
| case core.TimeStampz: | |||
| res = core.Char | |||
| c.Length = 64 | |||
| case core.Enum: //mysql enum | |||
| res = core.Enum | |||
| res += "(" | |||
| opts := "" | |||
| for v, _ := range c.EnumOptions { | |||
| opts += fmt.Sprintf(",'%v'", v) | |||
| } | |||
| res += strings.TrimLeft(opts, ",") | |||
| res += ")" | |||
| case core.Set: //mysql set | |||
| res = core.Set | |||
| res += "(" | |||
| opts := "" | |||
| for v, _ := range c.SetOptions { | |||
| opts += fmt.Sprintf(",'%v'", v) | |||
| } | |||
| res += strings.TrimLeft(opts, ",") | |||
| res += ")" | |||
| case core.NVarchar: | |||
| res = core.Varchar | |||
| case core.Uuid: | |||
| res = core.Varchar | |||
| c.Length = 40 | |||
| case core.Json: | |||
| res = core.Text | |||
| default: | |||
| res = t | |||
| } | |||
| var hasLen1 bool = (c.Length > 0) | |||
| var hasLen2 bool = (c.Length2 > 0) | |||
| if res == core.BigInt && !hasLen1 && !hasLen2 { | |||
| c.Length = 20 | |||
| hasLen1 = true | |||
| } | |||
| if hasLen2 { | |||
| res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" | |||
| } else if hasLen1 { | |||
| res += "(" + strconv.Itoa(c.Length) + ")" | |||
| } | |||
| return res | |||
| } | |||
| func (db *tidb) SupportInsertMany() bool { | |||
| return true | |||
| } | |||
| func (db *tidb) IsReserved(name string) bool { | |||
| return false | |||
| } | |||
| func (db *tidb) Quote(name string) string { | |||
| return "`" + name + "`" | |||
| } | |||
| func (db *tidb) QuoteStr() string { | |||
| return "`" | |||
| } | |||
| func (db *tidb) SupportEngine() bool { | |||
| return false | |||
| } | |||
| func (db *tidb) AutoIncrStr() string { | |||
| return "AUTO_INCREMENT" | |||
| } | |||
| func (db *tidb) SupportCharset() bool { | |||
| return false | |||
| } | |||
| func (db *tidb) IndexOnTable() bool { | |||
| return true | |||
| } | |||
| func (db *tidb) IndexCheckSql(tableName, idxName string) (string, []interface{}) { | |||
| args := []interface{}{db.DbName, tableName, idxName} | |||
| sql := "SELECT `INDEX_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS`" | |||
| sql += " WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `INDEX_NAME`=?" | |||
| return sql, args | |||
| } | |||
| func (db *tidb) TableCheckSql(tableName string) (string, []interface{}) { | |||
| args := []interface{}{db.DbName, tableName} | |||
| sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" | |||
| return sql, args | |||
| } | |||
| func (db *tidb) GetColumns(tableName string) ([]string, map[string]*core.Column, error) { | |||
| args := []interface{}{db.DbName, tableName} | |||
| s := "SELECT `COLUMN_NAME`, `IS_NULLABLE`, `COLUMN_DEFAULT`, `COLUMN_TYPE`," + | |||
| " `COLUMN_KEY`, `EXTRA` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" | |||
| rows, err := db.DB().Query(s, args...) | |||
| db.LogSQL(s, args) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| defer rows.Close() | |||
| cols := make(map[string]*core.Column) | |||
| colSeq := make([]string, 0) | |||
| for rows.Next() { | |||
| col := new(core.Column) | |||
| col.Indexes = make(map[string]int) | |||
| var columnName, isNullable, colType, colKey, extra string | |||
| var colDefault *string | |||
| err = rows.Scan(&columnName, &isNullable, &colDefault, &colType, &colKey, &extra) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| col.Name = strings.Trim(columnName, "` ") | |||
| if "YES" == isNullable { | |||
| col.Nullable = true | |||
| } | |||
| if colDefault != nil { | |||
| col.Default = *colDefault | |||
| if col.Default == "" { | |||
| col.DefaultIsEmpty = true | |||
| } | |||
| } | |||
| cts := strings.Split(colType, "(") | |||
| colName := cts[0] | |||
| colType = strings.ToUpper(colName) | |||
| var len1, len2 int | |||
| if len(cts) == 2 { | |||
| idx := strings.Index(cts[1], ")") | |||
| if colType == core.Enum && cts[1][0] == '\'' { //enum | |||
| options := strings.Split(cts[1][0:idx], ",") | |||
| col.EnumOptions = make(map[string]int) | |||
| for k, v := range options { | |||
| v = strings.TrimSpace(v) | |||
| v = strings.Trim(v, "'") | |||
| col.EnumOptions[v] = k | |||
| } | |||
| } else if colType == core.Set && cts[1][0] == '\'' { | |||
| options := strings.Split(cts[1][0:idx], ",") | |||
| col.SetOptions = make(map[string]int) | |||
| for k, v := range options { | |||
| v = strings.TrimSpace(v) | |||
| v = strings.Trim(v, "'") | |||
| col.SetOptions[v] = k | |||
| } | |||
| } else { | |||
| lens := strings.Split(cts[1][0:idx], ",") | |||
| len1, err = strconv.Atoi(strings.TrimSpace(lens[0])) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| if len(lens) == 2 { | |||
| len2, err = strconv.Atoi(lens[1]) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| } | |||
| } | |||
| } | |||
| if colType == "FLOAT UNSIGNED" { | |||
| colType = "FLOAT" | |||
| } | |||
| col.Length = len1 | |||
| col.Length2 = len2 | |||
| if _, ok := core.SqlTypes[colType]; ok { | |||
| col.SQLType = core.SQLType{colType, len1, len2} | |||
| } else { | |||
| return nil, nil, errors.New(fmt.Sprintf("unkonw colType %v", colType)) | |||
| } | |||
| if colKey == "PRI" { | |||
| col.IsPrimaryKey = true | |||
| } | |||
| if colKey == "UNI" { | |||
| //col.is | |||
| } | |||
| if extra == "auto_increment" { | |||
| col.IsAutoIncrement = true | |||
| } | |||
| if col.SQLType.IsText() || col.SQLType.IsTime() { | |||
| if col.Default != "" { | |||
| col.Default = "'" + col.Default + "'" | |||
| } else { | |||
| if col.DefaultIsEmpty { | |||
| col.Default = "''" | |||
| } | |||
| } | |||
| } | |||
| cols[col.Name] = col | |||
| colSeq = append(colSeq, col.Name) | |||
| } | |||
| return colSeq, cols, nil | |||
| } | |||
| func (db *tidb) GetTables() ([]*core.Table, error) { | |||
| args := []interface{}{db.DbName} | |||
| s := "SELECT `TABLE_NAME`, `ENGINE`, `TABLE_ROWS`, `AUTO_INCREMENT` from " + | |||
| "`INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? AND (`ENGINE`='MyISAM' OR `ENGINE` = 'InnoDB')" | |||
| rows, err := db.DB().Query(s, args...) | |||
| db.LogSQL(s, args) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer rows.Close() | |||
| tables := make([]*core.Table, 0) | |||
| for rows.Next() { | |||
| table := core.NewEmptyTable() | |||
| var name, engine, tableRows string | |||
| var autoIncr *string | |||
| err = rows.Scan(&name, &engine, &tableRows, &autoIncr) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| table.Name = name | |||
| table.StoreEngine = engine | |||
| tables = append(tables, table) | |||
| } | |||
| return tables, nil | |||
| } | |||
| func (db *tidb) GetIndexes(tableName string) (map[string]*core.Index, error) { | |||
| args := []interface{}{db.DbName, tableName} | |||
| s := "SELECT `INDEX_NAME`, `NON_UNIQUE`, `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`STATISTICS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ?" | |||
| rows, err := db.DB().Query(s, args...) | |||
| db.LogSQL(s, args) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| defer rows.Close() | |||
| indexes := make(map[string]*core.Index, 0) | |||
| for rows.Next() { | |||
| var indexType int | |||
| var indexName, colName, nonUnique string | |||
| err = rows.Scan(&indexName, &nonUnique, &colName) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if indexName == "PRIMARY" { | |||
| continue | |||
| } | |||
| if "YES" == nonUnique || nonUnique == "1" { | |||
| indexType = core.IndexType | |||
| } else { | |||
| indexType = core.UniqueType | |||
| } | |||
| colName = strings.Trim(colName, "` ") | |||
| var isRegular bool | |||
| if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { | |||
| indexName = indexName[5+len(tableName) : len(indexName)] | |||
| isRegular = true | |||
| } | |||
| var index *core.Index | |||
| var ok bool | |||
| if index, ok = indexes[indexName]; !ok { | |||
| index = new(core.Index) | |||
| index.IsRegular = isRegular | |||
| index.Type = indexType | |||
| index.Name = indexName | |||
| indexes[indexName] = index | |||
| } | |||
| index.AddColumn(colName) | |||
| } | |||
| return indexes, nil | |||
| } | |||
| func (db *tidb) Filters() []core.Filter { | |||
| return []core.Filter{&core.IdFilter{}} | |||
| } | |||
| @@ -0,0 +1,48 @@ | |||
| // Copyright 2015 The Xorm Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package tidb | |||
| import ( | |||
| "errors" | |||
| "net/url" | |||
| "path/filepath" | |||
| "github.com/go-xorm/core" | |||
| ) | |||
| var ( | |||
| _ core.Dialect = (*tidb)(nil) | |||
| DBType core.DbType = "tidb" | |||
| ) | |||
| func init() { | |||
| core.RegisterDriver(string(DBType), &tidbDriver{}) | |||
| core.RegisterDialect(DBType, func() core.Dialect { | |||
| return &tidb{} | |||
| }) | |||
| } | |||
| type tidbDriver struct { | |||
| } | |||
| func (p *tidbDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { | |||
| u, err := url.Parse(dataSourceName) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if u.Scheme != "goleveldb" && u.Scheme != "memory" && u.Scheme != "boltdb" { | |||
| return nil, errors.New(u.Scheme + " is not supported yet.") | |||
| } | |||
| path := filepath.Join(u.Host, u.Path) | |||
| dbName := filepath.Clean(filepath.Base(path)) | |||
| uri := &core.Uri{ | |||
| DbType: DBType, | |||
| DbName: dbName, | |||
| } | |||
| return uri, nil | |||
| } | |||
| @@ -0,0 +1,31 @@ | |||
| Go support for Protocol Buffers - Google's data interchange format | |||
| Copyright 2010 The Go Authors. All rights reserved. | |||
| https://github.com/golang/protobuf | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,43 @@ | |||
| # Go support for Protocol Buffers - Google's data interchange format | |||
| # | |||
| # Copyright 2010 The Go Authors. All rights reserved. | |||
| # https://github.com/golang/protobuf | |||
| # | |||
| # Redistribution and use in source and binary forms, with or without | |||
| # modification, are permitted provided that the following conditions are | |||
| # met: | |||
| # | |||
| # * Redistributions of source code must retain the above copyright | |||
| # notice, this list of conditions and the following disclaimer. | |||
| # * Redistributions in binary form must reproduce the above | |||
| # copyright notice, this list of conditions and the following disclaimer | |||
| # in the documentation and/or other materials provided with the | |||
| # distribution. | |||
| # * Neither the name of Google Inc. nor the names of its | |||
| # contributors may be used to endorse or promote products derived from | |||
| # this software without specific prior written permission. | |||
| # | |||
| # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| install: | |||
| go install | |||
| test: install generate-test-pbs | |||
| go test | |||
| generate-test-pbs: | |||
| make install | |||
| make -C testdata | |||
| protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto | |||
| make | |||
| @@ -0,0 +1,223 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2011 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| // Protocol buffer deep copy and merge. | |||
| // TODO: RawMessage. | |||
| package proto | |||
| import ( | |||
| "log" | |||
| "reflect" | |||
| "strings" | |||
| ) | |||
| // Clone returns a deep copy of a protocol buffer. | |||
| func Clone(pb Message) Message { | |||
| in := reflect.ValueOf(pb) | |||
| if in.IsNil() { | |||
| return pb | |||
| } | |||
| out := reflect.New(in.Type().Elem()) | |||
| // out is empty so a merge is a deep copy. | |||
| mergeStruct(out.Elem(), in.Elem()) | |||
| return out.Interface().(Message) | |||
| } | |||
| // Merge merges src into dst. | |||
| // Required and optional fields that are set in src will be set to that value in dst. | |||
| // Elements of repeated fields will be appended. | |||
| // Merge panics if src and dst are not the same type, or if dst is nil. | |||
| func Merge(dst, src Message) { | |||
| in := reflect.ValueOf(src) | |||
| out := reflect.ValueOf(dst) | |||
| if out.IsNil() { | |||
| panic("proto: nil destination") | |||
| } | |||
| if in.Type() != out.Type() { | |||
| // Explicit test prior to mergeStruct so that mistyped nils will fail | |||
| panic("proto: type mismatch") | |||
| } | |||
| if in.IsNil() { | |||
| // Merging nil into non-nil is a quiet no-op | |||
| return | |||
| } | |||
| mergeStruct(out.Elem(), in.Elem()) | |||
| } | |||
| func mergeStruct(out, in reflect.Value) { | |||
| sprop := GetProperties(in.Type()) | |||
| for i := 0; i < in.NumField(); i++ { | |||
| f := in.Type().Field(i) | |||
| if strings.HasPrefix(f.Name, "XXX_") { | |||
| continue | |||
| } | |||
| mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) | |||
| } | |||
| if emIn, ok := in.Addr().Interface().(extendableProto); ok { | |||
| emOut := out.Addr().Interface().(extendableProto) | |||
| mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) | |||
| } | |||
| uf := in.FieldByName("XXX_unrecognized") | |||
| if !uf.IsValid() { | |||
| return | |||
| } | |||
| uin := uf.Bytes() | |||
| if len(uin) > 0 { | |||
| out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) | |||
| } | |||
| } | |||
| // mergeAny performs a merge between two values of the same type. | |||
| // viaPtr indicates whether the values were indirected through a pointer (implying proto2). | |||
| // prop is set if this is a struct field (it may be nil). | |||
| func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { | |||
| if in.Type() == protoMessageType { | |||
| if !in.IsNil() { | |||
| if out.IsNil() { | |||
| out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) | |||
| } else { | |||
| Merge(out.Interface().(Message), in.Interface().(Message)) | |||
| } | |||
| } | |||
| return | |||
| } | |||
| switch in.Kind() { | |||
| case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, | |||
| reflect.String, reflect.Uint32, reflect.Uint64: | |||
| if !viaPtr && isProto3Zero(in) { | |||
| return | |||
| } | |||
| out.Set(in) | |||
| case reflect.Interface: | |||
| // Probably a oneof field; copy non-nil values. | |||
| if in.IsNil() { | |||
| return | |||
| } | |||
| // Allocate destination if it is not set, or set to a different type. | |||
| // Otherwise we will merge as normal. | |||
| if out.IsNil() || out.Elem().Type() != in.Elem().Type() { | |||
| out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) | |||
| } | |||
| mergeAny(out.Elem(), in.Elem(), false, nil) | |||
| case reflect.Map: | |||
| if in.Len() == 0 { | |||
| return | |||
| } | |||
| if out.IsNil() { | |||
| out.Set(reflect.MakeMap(in.Type())) | |||
| } | |||
| // For maps with value types of *T or []byte we need to deep copy each value. | |||
| elemKind := in.Type().Elem().Kind() | |||
| for _, key := range in.MapKeys() { | |||
| var val reflect.Value | |||
| switch elemKind { | |||
| case reflect.Ptr: | |||
| val = reflect.New(in.Type().Elem().Elem()) | |||
| mergeAny(val, in.MapIndex(key), false, nil) | |||
| case reflect.Slice: | |||
| val = in.MapIndex(key) | |||
| val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) | |||
| default: | |||
| val = in.MapIndex(key) | |||
| } | |||
| out.SetMapIndex(key, val) | |||
| } | |||
| case reflect.Ptr: | |||
| if in.IsNil() { | |||
| return | |||
| } | |||
| if out.IsNil() { | |||
| out.Set(reflect.New(in.Elem().Type())) | |||
| } | |||
| mergeAny(out.Elem(), in.Elem(), true, nil) | |||
| case reflect.Slice: | |||
| if in.IsNil() { | |||
| return | |||
| } | |||
| if in.Type().Elem().Kind() == reflect.Uint8 { | |||
| // []byte is a scalar bytes field, not a repeated field. | |||
| // Edge case: if this is in a proto3 message, a zero length | |||
| // bytes field is considered the zero value, and should not | |||
| // be merged. | |||
| if prop != nil && prop.proto3 && in.Len() == 0 { | |||
| return | |||
| } | |||
| // Make a deep copy. | |||
| // Append to []byte{} instead of []byte(nil) so that we never end up | |||
| // with a nil result. | |||
| out.SetBytes(append([]byte{}, in.Bytes()...)) | |||
| return | |||
| } | |||
| n := in.Len() | |||
| if out.IsNil() { | |||
| out.Set(reflect.MakeSlice(in.Type(), 0, n)) | |||
| } | |||
| switch in.Type().Elem().Kind() { | |||
| case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, | |||
| reflect.String, reflect.Uint32, reflect.Uint64: | |||
| out.Set(reflect.AppendSlice(out, in)) | |||
| default: | |||
| for i := 0; i < n; i++ { | |||
| x := reflect.Indirect(reflect.New(in.Type().Elem())) | |||
| mergeAny(x, in.Index(i), false, nil) | |||
| out.Set(reflect.Append(out, x)) | |||
| } | |||
| } | |||
| case reflect.Struct: | |||
| mergeStruct(out, in) | |||
| default: | |||
| // unknown type, so not a protocol buffer | |||
| log.Printf("proto: don't know how to copy %v", in) | |||
| } | |||
| } | |||
| func mergeExtension(out, in map[int32]Extension) { | |||
| for extNum, eIn := range in { | |||
| eOut := Extension{desc: eIn.desc} | |||
| if eIn.value != nil { | |||
| v := reflect.New(reflect.TypeOf(eIn.value)).Elem() | |||
| mergeAny(v, reflect.ValueOf(eIn.value), false, nil) | |||
| eOut.value = v.Interface() | |||
| } | |||
| if eIn.enc != nil { | |||
| eOut.enc = make([]byte, len(eIn.enc)) | |||
| copy(eOut.enc, eIn.enc) | |||
| } | |||
| out[extNum] = eOut | |||
| } | |||
| } | |||
| @@ -0,0 +1,868 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| package proto | |||
| /* | |||
| * Routines for decoding protocol buffer data to construct in-memory representations. | |||
| */ | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "os" | |||
| "reflect" | |||
| ) | |||
| // errOverflow is returned when an integer is too large to be represented. | |||
| var errOverflow = errors.New("proto: integer overflow") | |||
| // ErrInternalBadWireType is returned by generated code when an incorrect | |||
| // wire type is encountered. It does not get returned to user code. | |||
| var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") | |||
| // The fundamental decoders that interpret bytes on the wire. | |||
| // Those that take integer types all return uint64 and are | |||
| // therefore of type valueDecoder. | |||
| // DecodeVarint reads a varint-encoded integer from the slice. | |||
| // It returns the integer and the number of bytes consumed, or | |||
| // zero if there is not enough. | |||
| // This is the format for the | |||
| // int32, int64, uint32, uint64, bool, and enum | |||
| // protocol buffer types. | |||
| func DecodeVarint(buf []byte) (x uint64, n int) { | |||
| // x, n already 0 | |||
| for shift := uint(0); shift < 64; shift += 7 { | |||
| if n >= len(buf) { | |||
| return 0, 0 | |||
| } | |||
| b := uint64(buf[n]) | |||
| n++ | |||
| x |= (b & 0x7F) << shift | |||
| if (b & 0x80) == 0 { | |||
| return x, n | |||
| } | |||
| } | |||
| // The number is too large to represent in a 64-bit value. | |||
| return 0, 0 | |||
| } | |||
| // DecodeVarint reads a varint-encoded integer from the Buffer. | |||
| // This is the format for the | |||
| // int32, int64, uint32, uint64, bool, and enum | |||
| // protocol buffer types. | |||
| func (p *Buffer) DecodeVarint() (x uint64, err error) { | |||
| // x, err already 0 | |||
| i := p.index | |||
| l := len(p.buf) | |||
| for shift := uint(0); shift < 64; shift += 7 { | |||
| if i >= l { | |||
| err = io.ErrUnexpectedEOF | |||
| return | |||
| } | |||
| b := p.buf[i] | |||
| i++ | |||
| x |= (uint64(b) & 0x7F) << shift | |||
| if b < 0x80 { | |||
| p.index = i | |||
| return | |||
| } | |||
| } | |||
| // The number is too large to represent in a 64-bit value. | |||
| err = errOverflow | |||
| return | |||
| } | |||
| // DecodeFixed64 reads a 64-bit integer from the Buffer. | |||
| // This is the format for the | |||
| // fixed64, sfixed64, and double protocol buffer types. | |||
| func (p *Buffer) DecodeFixed64() (x uint64, err error) { | |||
| // x, err already 0 | |||
| i := p.index + 8 | |||
| if i < 0 || i > len(p.buf) { | |||
| err = io.ErrUnexpectedEOF | |||
| return | |||
| } | |||
| p.index = i | |||
| x = uint64(p.buf[i-8]) | |||
| x |= uint64(p.buf[i-7]) << 8 | |||
| x |= uint64(p.buf[i-6]) << 16 | |||
| x |= uint64(p.buf[i-5]) << 24 | |||
| x |= uint64(p.buf[i-4]) << 32 | |||
| x |= uint64(p.buf[i-3]) << 40 | |||
| x |= uint64(p.buf[i-2]) << 48 | |||
| x |= uint64(p.buf[i-1]) << 56 | |||
| return | |||
| } | |||
| // DecodeFixed32 reads a 32-bit integer from the Buffer. | |||
| // This is the format for the | |||
| // fixed32, sfixed32, and float protocol buffer types. | |||
| func (p *Buffer) DecodeFixed32() (x uint64, err error) { | |||
| // x, err already 0 | |||
| i := p.index + 4 | |||
| if i < 0 || i > len(p.buf) { | |||
| err = io.ErrUnexpectedEOF | |||
| return | |||
| } | |||
| p.index = i | |||
| x = uint64(p.buf[i-4]) | |||
| x |= uint64(p.buf[i-3]) << 8 | |||
| x |= uint64(p.buf[i-2]) << 16 | |||
| x |= uint64(p.buf[i-1]) << 24 | |||
| return | |||
| } | |||
| // DecodeZigzag64 reads a zigzag-encoded 64-bit integer | |||
| // from the Buffer. | |||
| // This is the format used for the sint64 protocol buffer type. | |||
| func (p *Buffer) DecodeZigzag64() (x uint64, err error) { | |||
| x, err = p.DecodeVarint() | |||
| if err != nil { | |||
| return | |||
| } | |||
| x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) | |||
| return | |||
| } | |||
| // DecodeZigzag32 reads a zigzag-encoded 32-bit integer | |||
| // from the Buffer. | |||
| // This is the format used for the sint32 protocol buffer type. | |||
| func (p *Buffer) DecodeZigzag32() (x uint64, err error) { | |||
| x, err = p.DecodeVarint() | |||
| if err != nil { | |||
| return | |||
| } | |||
| x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) | |||
| return | |||
| } | |||
| // These are not ValueDecoders: they produce an array of bytes or a string. | |||
| // bytes, embedded messages | |||
| // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. | |||
| // This is the format used for the bytes protocol buffer | |||
| // type and for embedded messages. | |||
| func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { | |||
| n, err := p.DecodeVarint() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| nb := int(n) | |||
| if nb < 0 { | |||
| return nil, fmt.Errorf("proto: bad byte length %d", nb) | |||
| } | |||
| end := p.index + nb | |||
| if end < p.index || end > len(p.buf) { | |||
| return nil, io.ErrUnexpectedEOF | |||
| } | |||
| if !alloc { | |||
| // todo: check if can get more uses of alloc=false | |||
| buf = p.buf[p.index:end] | |||
| p.index += nb | |||
| return | |||
| } | |||
| buf = make([]byte, nb) | |||
| copy(buf, p.buf[p.index:]) | |||
| p.index += nb | |||
| return | |||
| } | |||
| // DecodeStringBytes reads an encoded string from the Buffer. | |||
| // This is the format used for the proto2 string type. | |||
| func (p *Buffer) DecodeStringBytes() (s string, err error) { | |||
| buf, err := p.DecodeRawBytes(false) | |||
| if err != nil { | |||
| return | |||
| } | |||
| return string(buf), nil | |||
| } | |||
| // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. | |||
| // If the protocol buffer has extensions, and the field matches, add it as an extension. | |||
| // Otherwise, if the XXX_unrecognized field exists, append the skipped data there. | |||
| func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { | |||
| oi := o.index | |||
| err := o.skip(t, tag, wire) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if !unrecField.IsValid() { | |||
| return nil | |||
| } | |||
| ptr := structPointer_Bytes(base, unrecField) | |||
| // Add the skipped field to struct field | |||
| obuf := o.buf | |||
| o.buf = *ptr | |||
| o.EncodeVarint(uint64(tag<<3 | wire)) | |||
| *ptr = append(o.buf, obuf[oi:o.index]...) | |||
| o.buf = obuf | |||
| return nil | |||
| } | |||
| // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. | |||
| func (o *Buffer) skip(t reflect.Type, tag, wire int) error { | |||
| var u uint64 | |||
| var err error | |||
| switch wire { | |||
| case WireVarint: | |||
| _, err = o.DecodeVarint() | |||
| case WireFixed64: | |||
| _, err = o.DecodeFixed64() | |||
| case WireBytes: | |||
| _, err = o.DecodeRawBytes(false) | |||
| case WireFixed32: | |||
| _, err = o.DecodeFixed32() | |||
| case WireStartGroup: | |||
| for { | |||
| u, err = o.DecodeVarint() | |||
| if err != nil { | |||
| break | |||
| } | |||
| fwire := int(u & 0x7) | |||
| if fwire == WireEndGroup { | |||
| break | |||
| } | |||
| ftag := int(u >> 3) | |||
| err = o.skip(t, ftag, fwire) | |||
| if err != nil { | |||
| break | |||
| } | |||
| } | |||
| default: | |||
| err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) | |||
| } | |||
| return err | |||
| } | |||
| // Unmarshaler is the interface representing objects that can | |||
| // unmarshal themselves. The method should reset the receiver before | |||
| // decoding starts. The argument points to data that may be | |||
| // overwritten, so implementations should not keep references to the | |||
| // buffer. | |||
| type Unmarshaler interface { | |||
| Unmarshal([]byte) error | |||
| } | |||
| // Unmarshal parses the protocol buffer representation in buf and places the | |||
| // decoded result in pb. If the struct underlying pb does not match | |||
| // the data in buf, the results can be unpredictable. | |||
| // | |||
| // Unmarshal resets pb before starting to unmarshal, so any | |||
| // existing data in pb is always removed. Use UnmarshalMerge | |||
| // to preserve and append to existing data. | |||
| func Unmarshal(buf []byte, pb Message) error { | |||
| pb.Reset() | |||
| return UnmarshalMerge(buf, pb) | |||
| } | |||
| // UnmarshalMerge parses the protocol buffer representation in buf and | |||
| // writes the decoded result to pb. If the struct underlying pb does not match | |||
| // the data in buf, the results can be unpredictable. | |||
| // | |||
| // UnmarshalMerge merges into existing data in pb. | |||
| // Most code should use Unmarshal instead. | |||
| func UnmarshalMerge(buf []byte, pb Message) error { | |||
| // If the object can unmarshal itself, let it. | |||
| if u, ok := pb.(Unmarshaler); ok { | |||
| return u.Unmarshal(buf) | |||
| } | |||
| return NewBuffer(buf).Unmarshal(pb) | |||
| } | |||
| // DecodeMessage reads a count-delimited message from the Buffer. | |||
| func (p *Buffer) DecodeMessage(pb Message) error { | |||
| enc, err := p.DecodeRawBytes(false) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return NewBuffer(enc).Unmarshal(pb) | |||
| } | |||
| // DecodeGroup reads a tag-delimited group from the Buffer. | |||
| func (p *Buffer) DecodeGroup(pb Message) error { | |||
| typ, base, err := getbase(pb) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) | |||
| } | |||
| // Unmarshal parses the protocol buffer representation in the | |||
| // Buffer and places the decoded result in pb. If the struct | |||
| // underlying pb does not match the data in the buffer, the results can be | |||
| // unpredictable. | |||
| func (p *Buffer) Unmarshal(pb Message) error { | |||
| // If the object can unmarshal itself, let it. | |||
| if u, ok := pb.(Unmarshaler); ok { | |||
| err := u.Unmarshal(p.buf[p.index:]) | |||
| p.index = len(p.buf) | |||
| return err | |||
| } | |||
| typ, base, err := getbase(pb) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) | |||
| if collectStats { | |||
| stats.Decode++ | |||
| } | |||
| return err | |||
| } | |||
| // unmarshalType does the work of unmarshaling a structure. | |||
| func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { | |||
| var state errorState | |||
| required, reqFields := prop.reqCount, uint64(0) | |||
| var err error | |||
| for err == nil && o.index < len(o.buf) { | |||
| oi := o.index | |||
| var u uint64 | |||
| u, err = o.DecodeVarint() | |||
| if err != nil { | |||
| break | |||
| } | |||
| wire := int(u & 0x7) | |||
| if wire == WireEndGroup { | |||
| if is_group { | |||
| return nil // input is satisfied | |||
| } | |||
| return fmt.Errorf("proto: %s: wiretype end group for non-group", st) | |||
| } | |||
| tag := int(u >> 3) | |||
| if tag <= 0 { | |||
| return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) | |||
| } | |||
| fieldnum, ok := prop.decoderTags.get(tag) | |||
| if !ok { | |||
| // Maybe it's an extension? | |||
| if prop.extendable { | |||
| if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { | |||
| if err = o.skip(st, tag, wire); err == nil { | |||
| ext := e.ExtensionMap()[int32(tag)] // may be missing | |||
| ext.enc = append(ext.enc, o.buf[oi:o.index]...) | |||
| e.ExtensionMap()[int32(tag)] = ext | |||
| } | |||
| continue | |||
| } | |||
| } | |||
| // Maybe it's a oneof? | |||
| if prop.oneofUnmarshaler != nil { | |||
| m := structPointer_Interface(base, st).(Message) | |||
| // First return value indicates whether tag is a oneof field. | |||
| ok, err = prop.oneofUnmarshaler(m, tag, wire, o) | |||
| if err == ErrInternalBadWireType { | |||
| // Map the error to something more descriptive. | |||
| // Do the formatting here to save generated code space. | |||
| err = fmt.Errorf("bad wiretype for oneof field in %T", m) | |||
| } | |||
| if ok { | |||
| continue | |||
| } | |||
| } | |||
| err = o.skipAndSave(st, tag, wire, base, prop.unrecField) | |||
| continue | |||
| } | |||
| p := prop.Prop[fieldnum] | |||
| if p.dec == nil { | |||
| fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) | |||
| continue | |||
| } | |||
| dec := p.dec | |||
| if wire != WireStartGroup && wire != p.WireType { | |||
| if wire == WireBytes && p.packedDec != nil { | |||
| // a packable field | |||
| dec = p.packedDec | |||
| } else { | |||
| err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) | |||
| continue | |||
| } | |||
| } | |||
| decErr := dec(o, p, base) | |||
| if decErr != nil && !state.shouldContinue(decErr, p) { | |||
| err = decErr | |||
| } | |||
| if err == nil && p.Required { | |||
| // Successfully decoded a required field. | |||
| if tag <= 64 { | |||
| // use bitmap for fields 1-64 to catch field reuse. | |||
| var mask uint64 = 1 << uint64(tag-1) | |||
| if reqFields&mask == 0 { | |||
| // new required field | |||
| reqFields |= mask | |||
| required-- | |||
| } | |||
| } else { | |||
| // This is imprecise. It can be fooled by a required field | |||
| // with a tag > 64 that is encoded twice; that's very rare. | |||
| // A fully correct implementation would require allocating | |||
| // a data structure, which we would like to avoid. | |||
| required-- | |||
| } | |||
| } | |||
| } | |||
| if err == nil { | |||
| if is_group { | |||
| return io.ErrUnexpectedEOF | |||
| } | |||
| if state.err != nil { | |||
| return state.err | |||
| } | |||
| if required > 0 { | |||
| // Not enough information to determine the exact field. If we use extra | |||
| // CPU, we could determine the field only if the missing required field | |||
| // has a tag <= 64 and we check reqFields. | |||
| return &RequiredNotSetError{"{Unknown}"} | |||
| } | |||
| } | |||
| return err | |||
| } | |||
| // Individual type decoders | |||
| // For each, | |||
| // u is the decoded value, | |||
| // v is a pointer to the field (pointer) in the struct | |||
| // Sizes of the pools to allocate inside the Buffer. | |||
| // The goal is modest amortization and allocation | |||
| // on at least 16-byte boundaries. | |||
| const ( | |||
| boolPoolSize = 16 | |||
| uint32PoolSize = 8 | |||
| uint64PoolSize = 4 | |||
| ) | |||
| // Decode a bool. | |||
| func (o *Buffer) dec_bool(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if len(o.bools) == 0 { | |||
| o.bools = make([]bool, boolPoolSize) | |||
| } | |||
| o.bools[0] = u != 0 | |||
| *structPointer_Bool(base, p.field) = &o.bools[0] | |||
| o.bools = o.bools[1:] | |||
| return nil | |||
| } | |||
| func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *structPointer_BoolVal(base, p.field) = u != 0 | |||
| return nil | |||
| } | |||
| // Decode an int32. | |||
| func (o *Buffer) dec_int32(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) | |||
| return nil | |||
| } | |||
| func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) | |||
| return nil | |||
| } | |||
| // Decode an int64. | |||
| func (o *Buffer) dec_int64(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| word64_Set(structPointer_Word64(base, p.field), o, u) | |||
| return nil | |||
| } | |||
| func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| word64Val_Set(structPointer_Word64Val(base, p.field), o, u) | |||
| return nil | |||
| } | |||
| // Decode a string. | |||
| func (o *Buffer) dec_string(p *Properties, base structPointer) error { | |||
| s, err := o.DecodeStringBytes() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *structPointer_String(base, p.field) = &s | |||
| return nil | |||
| } | |||
| func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { | |||
| s, err := o.DecodeStringBytes() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *structPointer_StringVal(base, p.field) = s | |||
| return nil | |||
| } | |||
| // Decode a slice of bytes ([]byte). | |||
| func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { | |||
| b, err := o.DecodeRawBytes(true) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *structPointer_Bytes(base, p.field) = b | |||
| return nil | |||
| } | |||
| // Decode a slice of bools ([]bool). | |||
| func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| v := structPointer_BoolSlice(base, p.field) | |||
| *v = append(*v, u != 0) | |||
| return nil | |||
| } | |||
| // Decode a slice of bools ([]bool) in packed format. | |||
| func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { | |||
| v := structPointer_BoolSlice(base, p.field) | |||
| nn, err := o.DecodeVarint() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| nb := int(nn) // number of bytes of encoded bools | |||
| fin := o.index + nb | |||
| if fin < o.index { | |||
| return errOverflow | |||
| } | |||
| y := *v | |||
| for o.index < fin { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| y = append(y, u != 0) | |||
| } | |||
| *v = y | |||
| return nil | |||
| } | |||
| // Decode a slice of int32s ([]int32). | |||
| func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| structPointer_Word32Slice(base, p.field).Append(uint32(u)) | |||
| return nil | |||
| } | |||
| // Decode a slice of int32s ([]int32) in packed format. | |||
| func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { | |||
| v := structPointer_Word32Slice(base, p.field) | |||
| nn, err := o.DecodeVarint() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| nb := int(nn) // number of bytes of encoded int32s | |||
| fin := o.index + nb | |||
| if fin < o.index { | |||
| return errOverflow | |||
| } | |||
| for o.index < fin { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| v.Append(uint32(u)) | |||
| } | |||
| return nil | |||
| } | |||
| // Decode a slice of int64s ([]int64). | |||
| func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| structPointer_Word64Slice(base, p.field).Append(u) | |||
| return nil | |||
| } | |||
| // Decode a slice of int64s ([]int64) in packed format. | |||
| func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { | |||
| v := structPointer_Word64Slice(base, p.field) | |||
| nn, err := o.DecodeVarint() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| nb := int(nn) // number of bytes of encoded int64s | |||
| fin := o.index + nb | |||
| if fin < o.index { | |||
| return errOverflow | |||
| } | |||
| for o.index < fin { | |||
| u, err := p.valDec(o) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| v.Append(u) | |||
| } | |||
| return nil | |||
| } | |||
| // Decode a slice of strings ([]string). | |||
| func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { | |||
| s, err := o.DecodeStringBytes() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| v := structPointer_StringSlice(base, p.field) | |||
| *v = append(*v, s) | |||
| return nil | |||
| } | |||
| // Decode a slice of slice of bytes ([][]byte). | |||
| func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { | |||
| b, err := o.DecodeRawBytes(true) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| v := structPointer_BytesSlice(base, p.field) | |||
| *v = append(*v, b) | |||
| return nil | |||
| } | |||
| // Decode a map field. | |||
| func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { | |||
| raw, err := o.DecodeRawBytes(false) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| oi := o.index // index at the end of this map entry | |||
| o.index -= len(raw) // move buffer back to start of map entry | |||
| mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V | |||
| if mptr.Elem().IsNil() { | |||
| mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) | |||
| } | |||
| v := mptr.Elem() // map[K]V | |||
| // Prepare addressable doubly-indirect placeholders for the key and value types. | |||
| // See enc_new_map for why. | |||
| keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K | |||
| keybase := toStructPointer(keyptr.Addr()) // **K | |||
| var valbase structPointer | |||
| var valptr reflect.Value | |||
| switch p.mtype.Elem().Kind() { | |||
| case reflect.Slice: | |||
| // []byte | |||
| var dummy []byte | |||
| valptr = reflect.ValueOf(&dummy) // *[]byte | |||
| valbase = toStructPointer(valptr) // *[]byte | |||
| case reflect.Ptr: | |||
| // message; valptr is **Msg; need to allocate the intermediate pointer | |||
| valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V | |||
| valptr.Set(reflect.New(valptr.Type().Elem())) | |||
| valbase = toStructPointer(valptr) | |||
| default: | |||
| // everything else | |||
| valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V | |||
| valbase = toStructPointer(valptr.Addr()) // **V | |||
| } | |||
| // Decode. | |||
| // This parses a restricted wire format, namely the encoding of a message | |||
| // with two fields. See enc_new_map for the format. | |||
| for o.index < oi { | |||
| // tagcode for key and value properties are always a single byte | |||
| // because they have tags 1 and 2. | |||
| tagcode := o.buf[o.index] | |||
| o.index++ | |||
| switch tagcode { | |||
| case p.mkeyprop.tagcode[0]: | |||
| if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { | |||
| return err | |||
| } | |||
| case p.mvalprop.tagcode[0]: | |||
| if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { | |||
| return err | |||
| } | |||
| default: | |||
| // TODO: Should we silently skip this instead? | |||
| return fmt.Errorf("proto: bad map data tag %d", raw[0]) | |||
| } | |||
| } | |||
| keyelem, valelem := keyptr.Elem(), valptr.Elem() | |||
| if !keyelem.IsValid() { | |||
| keyelem = reflect.Zero(p.mtype.Key()) | |||
| } | |||
| if !valelem.IsValid() { | |||
| valelem = reflect.Zero(p.mtype.Elem()) | |||
| } | |||
| v.SetMapIndex(keyelem, valelem) | |||
| return nil | |||
| } | |||
| // Decode a group. | |||
| func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { | |||
| bas := structPointer_GetStructPointer(base, p.field) | |||
| if structPointer_IsNil(bas) { | |||
| // allocate new nested message | |||
| bas = toStructPointer(reflect.New(p.stype)) | |||
| structPointer_SetStructPointer(base, p.field, bas) | |||
| } | |||
| return o.unmarshalType(p.stype, p.sprop, true, bas) | |||
| } | |||
| // Decode an embedded message. | |||
| func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { | |||
| raw, e := o.DecodeRawBytes(false) | |||
| if e != nil { | |||
| return e | |||
| } | |||
| bas := structPointer_GetStructPointer(base, p.field) | |||
| if structPointer_IsNil(bas) { | |||
| // allocate new nested message | |||
| bas = toStructPointer(reflect.New(p.stype)) | |||
| structPointer_SetStructPointer(base, p.field, bas) | |||
| } | |||
| // If the object can unmarshal itself, let it. | |||
| if p.isUnmarshaler { | |||
| iv := structPointer_Interface(bas, p.stype) | |||
| return iv.(Unmarshaler).Unmarshal(raw) | |||
| } | |||
| obuf := o.buf | |||
| oi := o.index | |||
| o.buf = raw | |||
| o.index = 0 | |||
| err = o.unmarshalType(p.stype, p.sprop, false, bas) | |||
| o.buf = obuf | |||
| o.index = oi | |||
| return err | |||
| } | |||
| // Decode a slice of embedded messages. | |||
| func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { | |||
| return o.dec_slice_struct(p, false, base) | |||
| } | |||
| // Decode a slice of embedded groups. | |||
| func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { | |||
| return o.dec_slice_struct(p, true, base) | |||
| } | |||
| // Decode a slice of structs ([]*struct). | |||
| func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { | |||
| v := reflect.New(p.stype) | |||
| bas := toStructPointer(v) | |||
| structPointer_StructPointerSlice(base, p.field).Append(bas) | |||
| if is_group { | |||
| err := o.unmarshalType(p.stype, p.sprop, is_group, bas) | |||
| return err | |||
| } | |||
| raw, err := o.DecodeRawBytes(false) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| // If the object can unmarshal itself, let it. | |||
| if p.isUnmarshaler { | |||
| iv := v.Interface() | |||
| return iv.(Unmarshaler).Unmarshal(raw) | |||
| } | |||
| obuf := o.buf | |||
| oi := o.index | |||
| o.buf = raw | |||
| o.index = 0 | |||
| err = o.unmarshalType(p.stype, p.sprop, is_group, bas) | |||
| o.buf = obuf | |||
| o.index = oi | |||
| return err | |||
| } | |||
| @@ -0,0 +1,276 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2011 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| // Protocol buffer comparison. | |||
| package proto | |||
| import ( | |||
| "bytes" | |||
| "log" | |||
| "reflect" | |||
| "strings" | |||
| ) | |||
| /* | |||
| Equal returns true iff protocol buffers a and b are equal. | |||
| The arguments must both be pointers to protocol buffer structs. | |||
| Equality is defined in this way: | |||
| - Two messages are equal iff they are the same type, | |||
| corresponding fields are equal, unknown field sets | |||
| are equal, and extensions sets are equal. | |||
| - Two set scalar fields are equal iff their values are equal. | |||
| If the fields are of a floating-point type, remember that | |||
| NaN != x for all x, including NaN. If the message is defined | |||
| in a proto3 .proto file, fields are not "set"; specifically, | |||
| zero length proto3 "bytes" fields are equal (nil == {}). | |||
| - Two repeated fields are equal iff their lengths are the same, | |||
| and their corresponding elements are equal (a "bytes" field, | |||
| although represented by []byte, is not a repeated field) | |||
| - Two unset fields are equal. | |||
| - Two unknown field sets are equal if their current | |||
| encoded state is equal. | |||
| - Two extension sets are equal iff they have corresponding | |||
| elements that are pairwise equal. | |||
| - Every other combination of things are not equal. | |||
| The return value is undefined if a and b are not protocol buffers. | |||
| */ | |||
| func Equal(a, b Message) bool { | |||
| if a == nil || b == nil { | |||
| return a == b | |||
| } | |||
| v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) | |||
| if v1.Type() != v2.Type() { | |||
| return false | |||
| } | |||
| if v1.Kind() == reflect.Ptr { | |||
| if v1.IsNil() { | |||
| return v2.IsNil() | |||
| } | |||
| if v2.IsNil() { | |||
| return false | |||
| } | |||
| v1, v2 = v1.Elem(), v2.Elem() | |||
| } | |||
| if v1.Kind() != reflect.Struct { | |||
| return false | |||
| } | |||
| return equalStruct(v1, v2) | |||
| } | |||
| // v1 and v2 are known to have the same type. | |||
| func equalStruct(v1, v2 reflect.Value) bool { | |||
| sprop := GetProperties(v1.Type()) | |||
| for i := 0; i < v1.NumField(); i++ { | |||
| f := v1.Type().Field(i) | |||
| if strings.HasPrefix(f.Name, "XXX_") { | |||
| continue | |||
| } | |||
| f1, f2 := v1.Field(i), v2.Field(i) | |||
| if f.Type.Kind() == reflect.Ptr { | |||
| if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { | |||
| // both unset | |||
| continue | |||
| } else if n1 != n2 { | |||
| // set/unset mismatch | |||
| return false | |||
| } | |||
| b1, ok := f1.Interface().(raw) | |||
| if ok { | |||
| b2 := f2.Interface().(raw) | |||
| // RawMessage | |||
| if !bytes.Equal(b1.Bytes(), b2.Bytes()) { | |||
| return false | |||
| } | |||
| continue | |||
| } | |||
| f1, f2 = f1.Elem(), f2.Elem() | |||
| } | |||
| if !equalAny(f1, f2, sprop.Prop[i]) { | |||
| return false | |||
| } | |||
| } | |||
| if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { | |||
| em2 := v2.FieldByName("XXX_extensions") | |||
| if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { | |||
| return false | |||
| } | |||
| } | |||
| uf := v1.FieldByName("XXX_unrecognized") | |||
| if !uf.IsValid() { | |||
| return true | |||
| } | |||
| u1 := uf.Bytes() | |||
| u2 := v2.FieldByName("XXX_unrecognized").Bytes() | |||
| if !bytes.Equal(u1, u2) { | |||
| return false | |||
| } | |||
| return true | |||
| } | |||
| // v1 and v2 are known to have the same type. | |||
| // prop may be nil. | |||
| func equalAny(v1, v2 reflect.Value, prop *Properties) bool { | |||
| if v1.Type() == protoMessageType { | |||
| m1, _ := v1.Interface().(Message) | |||
| m2, _ := v2.Interface().(Message) | |||
| return Equal(m1, m2) | |||
| } | |||
| switch v1.Kind() { | |||
| case reflect.Bool: | |||
| return v1.Bool() == v2.Bool() | |||
| case reflect.Float32, reflect.Float64: | |||
| return v1.Float() == v2.Float() | |||
| case reflect.Int32, reflect.Int64: | |||
| return v1.Int() == v2.Int() | |||
| case reflect.Interface: | |||
| // Probably a oneof field; compare the inner values. | |||
| n1, n2 := v1.IsNil(), v2.IsNil() | |||
| if n1 || n2 { | |||
| return n1 == n2 | |||
| } | |||
| e1, e2 := v1.Elem(), v2.Elem() | |||
| if e1.Type() != e2.Type() { | |||
| return false | |||
| } | |||
| return equalAny(e1, e2, nil) | |||
| case reflect.Map: | |||
| if v1.Len() != v2.Len() { | |||
| return false | |||
| } | |||
| for _, key := range v1.MapKeys() { | |||
| val2 := v2.MapIndex(key) | |||
| if !val2.IsValid() { | |||
| // This key was not found in the second map. | |||
| return false | |||
| } | |||
| if !equalAny(v1.MapIndex(key), val2, nil) { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| case reflect.Ptr: | |||
| return equalAny(v1.Elem(), v2.Elem(), prop) | |||
| case reflect.Slice: | |||
| if v1.Type().Elem().Kind() == reflect.Uint8 { | |||
| // short circuit: []byte | |||
| // Edge case: if this is in a proto3 message, a zero length | |||
| // bytes field is considered the zero value. | |||
| if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { | |||
| return true | |||
| } | |||
| if v1.IsNil() != v2.IsNil() { | |||
| return false | |||
| } | |||
| return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) | |||
| } | |||
| if v1.Len() != v2.Len() { | |||
| return false | |||
| } | |||
| for i := 0; i < v1.Len(); i++ { | |||
| if !equalAny(v1.Index(i), v2.Index(i), prop) { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| case reflect.String: | |||
| return v1.Interface().(string) == v2.Interface().(string) | |||
| case reflect.Struct: | |||
| return equalStruct(v1, v2) | |||
| case reflect.Uint32, reflect.Uint64: | |||
| return v1.Uint() == v2.Uint() | |||
| } | |||
| // unknown type, so not a protocol buffer | |||
| log.Printf("proto: don't know how to compare %v", v1) | |||
| return false | |||
| } | |||
| // base is the struct type that the extensions are based on. | |||
| // em1 and em2 are extension maps. | |||
| func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { | |||
| if len(em1) != len(em2) { | |||
| return false | |||
| } | |||
| for extNum, e1 := range em1 { | |||
| e2, ok := em2[extNum] | |||
| if !ok { | |||
| return false | |||
| } | |||
| m1, m2 := e1.value, e2.value | |||
| if m1 != nil && m2 != nil { | |||
| // Both are unencoded. | |||
| if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { | |||
| return false | |||
| } | |||
| continue | |||
| } | |||
| // At least one is encoded. To do a semantically correct comparison | |||
| // we need to unmarshal them first. | |||
| var desc *ExtensionDesc | |||
| if m := extensionMaps[base]; m != nil { | |||
| desc = m[extNum] | |||
| } | |||
| if desc == nil { | |||
| log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) | |||
| continue | |||
| } | |||
| var err error | |||
| if m1 == nil { | |||
| m1, err = decodeExtension(e1.enc, desc) | |||
| } | |||
| if m2 == nil && err == nil { | |||
| m2, err = decodeExtension(e2.enc, desc) | |||
| } | |||
| if err != nil { | |||
| // The encoded form is invalid. | |||
| log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) | |||
| return false | |||
| } | |||
| if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { | |||
| return false | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| @@ -0,0 +1,399 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| package proto | |||
| /* | |||
| * Types and routines for supporting protocol buffer extensions. | |||
| */ | |||
| import ( | |||
| "errors" | |||
| "fmt" | |||
| "reflect" | |||
| "strconv" | |||
| "sync" | |||
| ) | |||
| // ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. | |||
| var ErrMissingExtension = errors.New("proto: missing extension") | |||
| // ExtensionRange represents a range of message extensions for a protocol buffer. | |||
| // Used in code generated by the protocol compiler. | |||
| type ExtensionRange struct { | |||
| Start, End int32 // both inclusive | |||
| } | |||
| // extendableProto is an interface implemented by any protocol buffer that may be extended. | |||
| type extendableProto interface { | |||
| Message | |||
| ExtensionRangeArray() []ExtensionRange | |||
| ExtensionMap() map[int32]Extension | |||
| } | |||
| var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() | |||
| // ExtensionDesc represents an extension specification. | |||
| // Used in generated code from the protocol compiler. | |||
| type ExtensionDesc struct { | |||
| ExtendedType Message // nil pointer to the type that is being extended | |||
| ExtensionType interface{} // nil pointer to the extension type | |||
| Field int32 // field number | |||
| Name string // fully-qualified name of extension, for text formatting | |||
| Tag string // protobuf tag style | |||
| } | |||
| func (ed *ExtensionDesc) repeated() bool { | |||
| t := reflect.TypeOf(ed.ExtensionType) | |||
| return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 | |||
| } | |||
| // Extension represents an extension in a message. | |||
| type Extension struct { | |||
| // When an extension is stored in a message using SetExtension | |||
| // only desc and value are set. When the message is marshaled | |||
| // enc will be set to the encoded form of the message. | |||
| // | |||
| // When a message is unmarshaled and contains extensions, each | |||
| // extension will have only enc set. When such an extension is | |||
| // accessed using GetExtension (or GetExtensions) desc and value | |||
| // will be set. | |||
| desc *ExtensionDesc | |||
| value interface{} | |||
| enc []byte | |||
| } | |||
| // SetRawExtension is for testing only. | |||
| func SetRawExtension(base extendableProto, id int32, b []byte) { | |||
| base.ExtensionMap()[id] = Extension{enc: b} | |||
| } | |||
| // isExtensionField returns true iff the given field number is in an extension range. | |||
| func isExtensionField(pb extendableProto, field int32) bool { | |||
| for _, er := range pb.ExtensionRangeArray() { | |||
| if er.Start <= field && field <= er.End { | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| // checkExtensionTypes checks that the given extension is valid for pb. | |||
| func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { | |||
| // Check the extended type. | |||
| if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { | |||
| return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) | |||
| } | |||
| // Check the range. | |||
| if !isExtensionField(pb, extension.Field) { | |||
| return errors.New("proto: bad extension number; not in declared ranges") | |||
| } | |||
| return nil | |||
| } | |||
| // extPropKey is sufficient to uniquely identify an extension. | |||
| type extPropKey struct { | |||
| base reflect.Type | |||
| field int32 | |||
| } | |||
| var extProp = struct { | |||
| sync.RWMutex | |||
| m map[extPropKey]*Properties | |||
| }{ | |||
| m: make(map[extPropKey]*Properties), | |||
| } | |||
| func extensionProperties(ed *ExtensionDesc) *Properties { | |||
| key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} | |||
| extProp.RLock() | |||
| if prop, ok := extProp.m[key]; ok { | |||
| extProp.RUnlock() | |||
| return prop | |||
| } | |||
| extProp.RUnlock() | |||
| extProp.Lock() | |||
| defer extProp.Unlock() | |||
| // Check again. | |||
| if prop, ok := extProp.m[key]; ok { | |||
| return prop | |||
| } | |||
| prop := new(Properties) | |||
| prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) | |||
| extProp.m[key] = prop | |||
| return prop | |||
| } | |||
| // encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. | |||
| func encodeExtensionMap(m map[int32]Extension) error { | |||
| for k, e := range m { | |||
| if e.value == nil || e.desc == nil { | |||
| // Extension is only in its encoded form. | |||
| continue | |||
| } | |||
| // We don't skip extensions that have an encoded form set, | |||
| // because the extension value may have been mutated after | |||
| // the last time this function was called. | |||
| et := reflect.TypeOf(e.desc.ExtensionType) | |||
| props := extensionProperties(e.desc) | |||
| p := NewBuffer(nil) | |||
| // If e.value has type T, the encoder expects a *struct{ X T }. | |||
| // Pass a *T with a zero field and hope it all works out. | |||
| x := reflect.New(et) | |||
| x.Elem().Set(reflect.ValueOf(e.value)) | |||
| if err := props.enc(p, props, toStructPointer(x)); err != nil { | |||
| return err | |||
| } | |||
| e.enc = p.buf | |||
| m[k] = e | |||
| } | |||
| return nil | |||
| } | |||
| func sizeExtensionMap(m map[int32]Extension) (n int) { | |||
| for _, e := range m { | |||
| if e.value == nil || e.desc == nil { | |||
| // Extension is only in its encoded form. | |||
| n += len(e.enc) | |||
| continue | |||
| } | |||
| // We don't skip extensions that have an encoded form set, | |||
| // because the extension value may have been mutated after | |||
| // the last time this function was called. | |||
| et := reflect.TypeOf(e.desc.ExtensionType) | |||
| props := extensionProperties(e.desc) | |||
| // If e.value has type T, the encoder expects a *struct{ X T }. | |||
| // Pass a *T with a zero field and hope it all works out. | |||
| x := reflect.New(et) | |||
| x.Elem().Set(reflect.ValueOf(e.value)) | |||
| n += props.size(props, toStructPointer(x)) | |||
| } | |||
| return | |||
| } | |||
| // HasExtension returns whether the given extension is present in pb. | |||
| func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { | |||
| // TODO: Check types, field numbers, etc.? | |||
| _, ok := pb.ExtensionMap()[extension.Field] | |||
| return ok | |||
| } | |||
| // ClearExtension removes the given extension from pb. | |||
| func ClearExtension(pb extendableProto, extension *ExtensionDesc) { | |||
| // TODO: Check types, field numbers, etc.? | |||
| delete(pb.ExtensionMap(), extension.Field) | |||
| } | |||
| // GetExtension parses and returns the given extension of pb. | |||
| // If the extension is not present and has no default value it returns ErrMissingExtension. | |||
| func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { | |||
| if err := checkExtensionTypes(pb, extension); err != nil { | |||
| return nil, err | |||
| } | |||
| emap := pb.ExtensionMap() | |||
| e, ok := emap[extension.Field] | |||
| if !ok { | |||
| // defaultExtensionValue returns the default value or | |||
| // ErrMissingExtension if there is no default. | |||
| return defaultExtensionValue(extension) | |||
| } | |||
| if e.value != nil { | |||
| // Already decoded. Check the descriptor, though. | |||
| if e.desc != extension { | |||
| // This shouldn't happen. If it does, it means that | |||
| // GetExtension was called twice with two different | |||
| // descriptors with the same field number. | |||
| return nil, errors.New("proto: descriptor conflict") | |||
| } | |||
| return e.value, nil | |||
| } | |||
| v, err := decodeExtension(e.enc, extension) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| // Remember the decoded version and drop the encoded version. | |||
| // That way it is safe to mutate what we return. | |||
| e.value = v | |||
| e.desc = extension | |||
| e.enc = nil | |||
| emap[extension.Field] = e | |||
| return e.value, nil | |||
| } | |||
| // defaultExtensionValue returns the default value for extension. | |||
| // If no default for an extension is defined ErrMissingExtension is returned. | |||
| func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { | |||
| t := reflect.TypeOf(extension.ExtensionType) | |||
| props := extensionProperties(extension) | |||
| sf, _, err := fieldDefault(t, props) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if sf == nil || sf.value == nil { | |||
| // There is no default value. | |||
| return nil, ErrMissingExtension | |||
| } | |||
| if t.Kind() != reflect.Ptr { | |||
| // We do not need to return a Ptr, we can directly return sf.value. | |||
| return sf.value, nil | |||
| } | |||
| // We need to return an interface{} that is a pointer to sf.value. | |||
| value := reflect.New(t).Elem() | |||
| value.Set(reflect.New(value.Type().Elem())) | |||
| if sf.kind == reflect.Int32 { | |||
| // We may have an int32 or an enum, but the underlying data is int32. | |||
| // Since we can't set an int32 into a non int32 reflect.value directly | |||
| // set it as a int32. | |||
| value.Elem().SetInt(int64(sf.value.(int32))) | |||
| } else { | |||
| value.Elem().Set(reflect.ValueOf(sf.value)) | |||
| } | |||
| return value.Interface(), nil | |||
| } | |||
| // decodeExtension decodes an extension encoded in b. | |||
| func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { | |||
| o := NewBuffer(b) | |||
| t := reflect.TypeOf(extension.ExtensionType) | |||
| props := extensionProperties(extension) | |||
| // t is a pointer to a struct, pointer to basic type or a slice. | |||
| // Allocate a "field" to store the pointer/slice itself; the | |||
| // pointer/slice will be stored here. We pass | |||
| // the address of this field to props.dec. | |||
| // This passes a zero field and a *t and lets props.dec | |||
| // interpret it as a *struct{ x t }. | |||
| value := reflect.New(t).Elem() | |||
| for { | |||
| // Discard wire type and field number varint. It isn't needed. | |||
| if _, err := o.DecodeVarint(); err != nil { | |||
| return nil, err | |||
| } | |||
| if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { | |||
| return nil, err | |||
| } | |||
| if o.index >= len(o.buf) { | |||
| break | |||
| } | |||
| } | |||
| return value.Interface(), nil | |||
| } | |||
| // GetExtensions returns a slice of the extensions present in pb that are also listed in es. | |||
| // The returned slice has the same length as es; missing extensions will appear as nil elements. | |||
| func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { | |||
| epb, ok := pb.(extendableProto) | |||
| if !ok { | |||
| err = errors.New("proto: not an extendable proto") | |||
| return | |||
| } | |||
| extensions = make([]interface{}, len(es)) | |||
| for i, e := range es { | |||
| extensions[i], err = GetExtension(epb, e) | |||
| if err == ErrMissingExtension { | |||
| err = nil | |||
| } | |||
| if err != nil { | |||
| return | |||
| } | |||
| } | |||
| return | |||
| } | |||
| // SetExtension sets the specified extension of pb to the specified value. | |||
| func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { | |||
| if err := checkExtensionTypes(pb, extension); err != nil { | |||
| return err | |||
| } | |||
| typ := reflect.TypeOf(extension.ExtensionType) | |||
| if typ != reflect.TypeOf(value) { | |||
| return errors.New("proto: bad extension value type") | |||
| } | |||
| // nil extension values need to be caught early, because the | |||
| // encoder can't distinguish an ErrNil due to a nil extension | |||
| // from an ErrNil due to a missing field. Extensions are | |||
| // always optional, so the encoder would just swallow the error | |||
| // and drop all the extensions from the encoded message. | |||
| if reflect.ValueOf(value).IsNil() { | |||
| return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) | |||
| } | |||
| pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} | |||
| return nil | |||
| } | |||
| // A global registry of extensions. | |||
| // The generated code will register the generated descriptors by calling RegisterExtension. | |||
| var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) | |||
| // RegisterExtension is called from the generated code. | |||
| func RegisterExtension(desc *ExtensionDesc) { | |||
| st := reflect.TypeOf(desc.ExtendedType).Elem() | |||
| m := extensionMaps[st] | |||
| if m == nil { | |||
| m = make(map[int32]*ExtensionDesc) | |||
| extensionMaps[st] = m | |||
| } | |||
| if _, ok := m[desc.Field]; ok { | |||
| panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) | |||
| } | |||
| m[desc.Field] = desc | |||
| } | |||
| // RegisteredExtensions returns a map of the registered extensions of a | |||
| // protocol buffer struct, indexed by the extension number. | |||
| // The argument pb should be a nil pointer to the struct type. | |||
| func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { | |||
| return extensionMaps[reflect.TypeOf(pb).Elem()] | |||
| } | |||
| @@ -0,0 +1,894 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| /* | |||
| Package proto converts data structures to and from the wire format of | |||
| protocol buffers. It works in concert with the Go source code generated | |||
| for .proto files by the protocol compiler. | |||
| A summary of the properties of the protocol buffer interface | |||
| for a protocol buffer variable v: | |||
| - Names are turned from camel_case to CamelCase for export. | |||
| - There are no methods on v to set fields; just treat | |||
| them as structure fields. | |||
| - There are getters that return a field's value if set, | |||
| and return the field's default value if unset. | |||
| The getters work even if the receiver is a nil message. | |||
| - The zero value for a struct is its correct initialization state. | |||
| All desired fields must be set before marshaling. | |||
| - A Reset() method will restore a protobuf struct to its zero state. | |||
| - Non-repeated fields are pointers to the values; nil means unset. | |||
| That is, optional or required field int32 f becomes F *int32. | |||
| - Repeated fields are slices. | |||
| - Helper functions are available to aid the setting of fields. | |||
| msg.Foo = proto.String("hello") // set field | |||
| - Constants are defined to hold the default values of all fields that | |||
| have them. They have the form Default_StructName_FieldName. | |||
| Because the getter methods handle defaulted values, | |||
| direct use of these constants should be rare. | |||
| - Enums are given type names and maps from names to values. | |||
| Enum values are prefixed by the enclosing message's name, or by the | |||
| enum's type name if it is a top-level enum. Enum types have a String | |||
| method, and a Enum method to assist in message construction. | |||
| - Nested messages, groups and enums have type names prefixed with the name of | |||
| the surrounding message type. | |||
| - Extensions are given descriptor names that start with E_, | |||
| followed by an underscore-delimited list of the nested messages | |||
| that contain it (if any) followed by the CamelCased name of the | |||
| extension field itself. HasExtension, ClearExtension, GetExtension | |||
| and SetExtension are functions for manipulating extensions. | |||
| - Oneof field sets are given a single field in their message, | |||
| with distinguished wrapper types for each possible field value. | |||
| - Marshal and Unmarshal are functions to encode and decode the wire format. | |||
| When the .proto file specifies `syntax="proto3"`, there are some differences: | |||
| - Non-repeated fields of non-message type are values instead of pointers. | |||
| - Getters are only generated for message and oneof fields. | |||
| - Enum types do not get an Enum method. | |||
| The simplest way to describe this is to see an example. | |||
| Given file test.proto, containing | |||
| package example; | |||
| enum FOO { X = 17; } | |||
| message Test { | |||
| required string label = 1; | |||
| optional int32 type = 2 [default=77]; | |||
| repeated int64 reps = 3; | |||
| optional group OptionalGroup = 4 { | |||
| required string RequiredField = 5; | |||
| } | |||
| oneof union { | |||
| int32 number = 6; | |||
| string name = 7; | |||
| } | |||
| } | |||
| The resulting file, test.pb.go, is: | |||
| package example | |||
| import proto "github.com/golang/protobuf/proto" | |||
| import math "math" | |||
| type FOO int32 | |||
| const ( | |||
| FOO_X FOO = 17 | |||
| ) | |||
| var FOO_name = map[int32]string{ | |||
| 17: "X", | |||
| } | |||
| var FOO_value = map[string]int32{ | |||
| "X": 17, | |||
| } | |||
| func (x FOO) Enum() *FOO { | |||
| p := new(FOO) | |||
| *p = x | |||
| return p | |||
| } | |||
| func (x FOO) String() string { | |||
| return proto.EnumName(FOO_name, int32(x)) | |||
| } | |||
| func (x *FOO) UnmarshalJSON(data []byte) error { | |||
| value, err := proto.UnmarshalJSONEnum(FOO_value, data) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| *x = FOO(value) | |||
| return nil | |||
| } | |||
| type Test struct { | |||
| Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` | |||
| Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` | |||
| Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` | |||
| Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` | |||
| // Types that are valid to be assigned to Union: | |||
| // *Test_Number | |||
| // *Test_Name | |||
| Union isTest_Union `protobuf_oneof:"union"` | |||
| XXX_unrecognized []byte `json:"-"` | |||
| } | |||
| func (m *Test) Reset() { *m = Test{} } | |||
| func (m *Test) String() string { return proto.CompactTextString(m) } | |||
| func (*Test) ProtoMessage() {} | |||
| type isTest_Union interface { | |||
| isTest_Union() | |||
| } | |||
| type Test_Number struct { | |||
| Number int32 `protobuf:"varint,6,opt,name=number"` | |||
| } | |||
| type Test_Name struct { | |||
| Name string `protobuf:"bytes,7,opt,name=name"` | |||
| } | |||
| func (*Test_Number) isTest_Union() {} | |||
| func (*Test_Name) isTest_Union() {} | |||
| func (m *Test) GetUnion() isTest_Union { | |||
| if m != nil { | |||
| return m.Union | |||
| } | |||
| return nil | |||
| } | |||
| const Default_Test_Type int32 = 77 | |||
| func (m *Test) GetLabel() string { | |||
| if m != nil && m.Label != nil { | |||
| return *m.Label | |||
| } | |||
| return "" | |||
| } | |||
| func (m *Test) GetType() int32 { | |||
| if m != nil && m.Type != nil { | |||
| return *m.Type | |||
| } | |||
| return Default_Test_Type | |||
| } | |||
| func (m *Test) GetOptionalgroup() *Test_OptionalGroup { | |||
| if m != nil { | |||
| return m.Optionalgroup | |||
| } | |||
| return nil | |||
| } | |||
| type Test_OptionalGroup struct { | |||
| RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` | |||
| } | |||
| func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } | |||
| func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } | |||
| func (m *Test_OptionalGroup) GetRequiredField() string { | |||
| if m != nil && m.RequiredField != nil { | |||
| return *m.RequiredField | |||
| } | |||
| return "" | |||
| } | |||
| func (m *Test) GetNumber() int32 { | |||
| if x, ok := m.GetUnion().(*Test_Number); ok { | |||
| return x.Number | |||
| } | |||
| return 0 | |||
| } | |||
| func (m *Test) GetName() string { | |||
| if x, ok := m.GetUnion().(*Test_Name); ok { | |||
| return x.Name | |||
| } | |||
| return "" | |||
| } | |||
| func init() { | |||
| proto.RegisterEnum("example.FOO", FOO_name, FOO_value) | |||
| } | |||
| To create and play with a Test object: | |||
| package main | |||
| import ( | |||
| "log" | |||
| "github.com/golang/protobuf/proto" | |||
| pb "./example.pb" | |||
| ) | |||
| func main() { | |||
| test := &pb.Test{ | |||
| Label: proto.String("hello"), | |||
| Type: proto.Int32(17), | |||
| Reps: []int64{1, 2, 3}, | |||
| Optionalgroup: &pb.Test_OptionalGroup{ | |||
| RequiredField: proto.String("good bye"), | |||
| }, | |||
| Union: &pb.Test_Name{"fred"}, | |||
| } | |||
| data, err := proto.Marshal(test) | |||
| if err != nil { | |||
| log.Fatal("marshaling error: ", err) | |||
| } | |||
| newTest := &pb.Test{} | |||
| err = proto.Unmarshal(data, newTest) | |||
| if err != nil { | |||
| log.Fatal("unmarshaling error: ", err) | |||
| } | |||
| // Now test and newTest contain the same data. | |||
| if test.GetLabel() != newTest.GetLabel() { | |||
| log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) | |||
| } | |||
| // Use a type switch to determine which oneof was set. | |||
| switch u := test.Union.(type) { | |||
| case *pb.Test_Number: // u.Number contains the number. | |||
| case *pb.Test_Name: // u.Name contains the string. | |||
| } | |||
| // etc. | |||
| } | |||
| */ | |||
| package proto | |||
| import ( | |||
| "encoding/json" | |||
| "fmt" | |||
| "log" | |||
| "reflect" | |||
| "sort" | |||
| "strconv" | |||
| "sync" | |||
| ) | |||
| // Message is implemented by generated protocol buffer messages. | |||
| type Message interface { | |||
| Reset() | |||
| String() string | |||
| ProtoMessage() | |||
| } | |||
| // Stats records allocation details about the protocol buffer encoders | |||
| // and decoders. Useful for tuning the library itself. | |||
| type Stats struct { | |||
| Emalloc uint64 // mallocs in encode | |||
| Dmalloc uint64 // mallocs in decode | |||
| Encode uint64 // number of encodes | |||
| Decode uint64 // number of decodes | |||
| Chit uint64 // number of cache hits | |||
| Cmiss uint64 // number of cache misses | |||
| Size uint64 // number of sizes | |||
| } | |||
| // Set to true to enable stats collection. | |||
| const collectStats = false | |||
| var stats Stats | |||
| // GetStats returns a copy of the global Stats structure. | |||
| func GetStats() Stats { return stats } | |||
| // A Buffer is a buffer manager for marshaling and unmarshaling | |||
| // protocol buffers. It may be reused between invocations to | |||
| // reduce memory usage. It is not necessary to use a Buffer; | |||
| // the global functions Marshal and Unmarshal create a | |||
| // temporary Buffer and are fine for most applications. | |||
| type Buffer struct { | |||
| buf []byte // encode/decode byte stream | |||
| index int // write point | |||
| // pools of basic types to amortize allocation. | |||
| bools []bool | |||
| uint32s []uint32 | |||
| uint64s []uint64 | |||
| // extra pools, only used with pointer_reflect.go | |||
| int32s []int32 | |||
| int64s []int64 | |||
| float32s []float32 | |||
| float64s []float64 | |||
| } | |||
| // NewBuffer allocates a new Buffer and initializes its internal data to | |||
| // the contents of the argument slice. | |||
| func NewBuffer(e []byte) *Buffer { | |||
| return &Buffer{buf: e} | |||
| } | |||
| // Reset resets the Buffer, ready for marshaling a new protocol buffer. | |||
| func (p *Buffer) Reset() { | |||
| p.buf = p.buf[0:0] // for reading/writing | |||
| p.index = 0 // for reading | |||
| } | |||
| // SetBuf replaces the internal buffer with the slice, | |||
| // ready for unmarshaling the contents of the slice. | |||
| func (p *Buffer) SetBuf(s []byte) { | |||
| p.buf = s | |||
| p.index = 0 | |||
| } | |||
| // Bytes returns the contents of the Buffer. | |||
| func (p *Buffer) Bytes() []byte { return p.buf } | |||
| /* | |||
| * Helper routines for simplifying the creation of optional fields of basic type. | |||
| */ | |||
| // Bool is a helper routine that allocates a new bool value | |||
| // to store v and returns a pointer to it. | |||
| func Bool(v bool) *bool { | |||
| return &v | |||
| } | |||
| // Int32 is a helper routine that allocates a new int32 value | |||
| // to store v and returns a pointer to it. | |||
| func Int32(v int32) *int32 { | |||
| return &v | |||
| } | |||
| // Int is a helper routine that allocates a new int32 value | |||
| // to store v and returns a pointer to it, but unlike Int32 | |||
| // its argument value is an int. | |||
| func Int(v int) *int32 { | |||
| p := new(int32) | |||
| *p = int32(v) | |||
| return p | |||
| } | |||
| // Int64 is a helper routine that allocates a new int64 value | |||
| // to store v and returns a pointer to it. | |||
| func Int64(v int64) *int64 { | |||
| return &v | |||
| } | |||
| // Float32 is a helper routine that allocates a new float32 value | |||
| // to store v and returns a pointer to it. | |||
| func Float32(v float32) *float32 { | |||
| return &v | |||
| } | |||
| // Float64 is a helper routine that allocates a new float64 value | |||
| // to store v and returns a pointer to it. | |||
| func Float64(v float64) *float64 { | |||
| return &v | |||
| } | |||
| // Uint32 is a helper routine that allocates a new uint32 value | |||
| // to store v and returns a pointer to it. | |||
| func Uint32(v uint32) *uint32 { | |||
| return &v | |||
| } | |||
| // Uint64 is a helper routine that allocates a new uint64 value | |||
| // to store v and returns a pointer to it. | |||
| func Uint64(v uint64) *uint64 { | |||
| return &v | |||
| } | |||
| // String is a helper routine that allocates a new string value | |||
| // to store v and returns a pointer to it. | |||
| func String(v string) *string { | |||
| return &v | |||
| } | |||
| // EnumName is a helper function to simplify printing protocol buffer enums | |||
| // by name. Given an enum map and a value, it returns a useful string. | |||
| func EnumName(m map[int32]string, v int32) string { | |||
| s, ok := m[v] | |||
| if ok { | |||
| return s | |||
| } | |||
| return strconv.Itoa(int(v)) | |||
| } | |||
| // UnmarshalJSONEnum is a helper function to simplify recovering enum int values | |||
| // from their JSON-encoded representation. Given a map from the enum's symbolic | |||
| // names to its int values, and a byte buffer containing the JSON-encoded | |||
| // value, it returns an int32 that can be cast to the enum type by the caller. | |||
| // | |||
| // The function can deal with both JSON representations, numeric and symbolic. | |||
| func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { | |||
| if data[0] == '"' { | |||
| // New style: enums are strings. | |||
| var repr string | |||
| if err := json.Unmarshal(data, &repr); err != nil { | |||
| return -1, err | |||
| } | |||
| val, ok := m[repr] | |||
| if !ok { | |||
| return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) | |||
| } | |||
| return val, nil | |||
| } | |||
| // Old style: enums are ints. | |||
| var val int32 | |||
| if err := json.Unmarshal(data, &val); err != nil { | |||
| return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) | |||
| } | |||
| return val, nil | |||
| } | |||
| // DebugPrint dumps the encoded data in b in a debugging format with a header | |||
| // including the string s. Used in testing but made available for general debugging. | |||
| func (p *Buffer) DebugPrint(s string, b []byte) { | |||
| var u uint64 | |||
| obuf := p.buf | |||
| index := p.index | |||
| p.buf = b | |||
| p.index = 0 | |||
| depth := 0 | |||
| fmt.Printf("\n--- %s ---\n", s) | |||
| out: | |||
| for { | |||
| for i := 0; i < depth; i++ { | |||
| fmt.Print(" ") | |||
| } | |||
| index := p.index | |||
| if index == len(p.buf) { | |||
| break | |||
| } | |||
| op, err := p.DecodeVarint() | |||
| if err != nil { | |||
| fmt.Printf("%3d: fetching op err %v\n", index, err) | |||
| break out | |||
| } | |||
| tag := op >> 3 | |||
| wire := op & 7 | |||
| switch wire { | |||
| default: | |||
| fmt.Printf("%3d: t=%3d unknown wire=%d\n", | |||
| index, tag, wire) | |||
| break out | |||
| case WireBytes: | |||
| var r []byte | |||
| r, err = p.DecodeRawBytes(false) | |||
| if err != nil { | |||
| break out | |||
| } | |||
| fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) | |||
| if len(r) <= 6 { | |||
| for i := 0; i < len(r); i++ { | |||
| fmt.Printf(" %.2x", r[i]) | |||
| } | |||
| } else { | |||
| for i := 0; i < 3; i++ { | |||
| fmt.Printf(" %.2x", r[i]) | |||
| } | |||
| fmt.Printf(" ..") | |||
| for i := len(r) - 3; i < len(r); i++ { | |||
| fmt.Printf(" %.2x", r[i]) | |||
| } | |||
| } | |||
| fmt.Printf("\n") | |||
| case WireFixed32: | |||
| u, err = p.DecodeFixed32() | |||
| if err != nil { | |||
| fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) | |||
| break out | |||
| } | |||
| fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) | |||
| case WireFixed64: | |||
| u, err = p.DecodeFixed64() | |||
| if err != nil { | |||
| fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) | |||
| break out | |||
| } | |||
| fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) | |||
| case WireVarint: | |||
| u, err = p.DecodeVarint() | |||
| if err != nil { | |||
| fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) | |||
| break out | |||
| } | |||
| fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) | |||
| case WireStartGroup: | |||
| fmt.Printf("%3d: t=%3d start\n", index, tag) | |||
| depth++ | |||
| case WireEndGroup: | |||
| depth-- | |||
| fmt.Printf("%3d: t=%3d end\n", index, tag) | |||
| } | |||
| } | |||
| if depth != 0 { | |||
| fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) | |||
| } | |||
| fmt.Printf("\n") | |||
| p.buf = obuf | |||
| p.index = index | |||
| } | |||
| // SetDefaults sets unset protocol buffer fields to their default values. | |||
| // It only modifies fields that are both unset and have defined defaults. | |||
| // It recursively sets default values in any non-nil sub-messages. | |||
| func SetDefaults(pb Message) { | |||
| setDefaults(reflect.ValueOf(pb), true, false) | |||
| } | |||
| // v is a pointer to a struct. | |||
| func setDefaults(v reflect.Value, recur, zeros bool) { | |||
| v = v.Elem() | |||
| defaultMu.RLock() | |||
| dm, ok := defaults[v.Type()] | |||
| defaultMu.RUnlock() | |||
| if !ok { | |||
| dm = buildDefaultMessage(v.Type()) | |||
| defaultMu.Lock() | |||
| defaults[v.Type()] = dm | |||
| defaultMu.Unlock() | |||
| } | |||
| for _, sf := range dm.scalars { | |||
| f := v.Field(sf.index) | |||
| if !f.IsNil() { | |||
| // field already set | |||
| continue | |||
| } | |||
| dv := sf.value | |||
| if dv == nil && !zeros { | |||
| // no explicit default, and don't want to set zeros | |||
| continue | |||
| } | |||
| fptr := f.Addr().Interface() // **T | |||
| // TODO: Consider batching the allocations we do here. | |||
| switch sf.kind { | |||
| case reflect.Bool: | |||
| b := new(bool) | |||
| if dv != nil { | |||
| *b = dv.(bool) | |||
| } | |||
| *(fptr.(**bool)) = b | |||
| case reflect.Float32: | |||
| f := new(float32) | |||
| if dv != nil { | |||
| *f = dv.(float32) | |||
| } | |||
| *(fptr.(**float32)) = f | |||
| case reflect.Float64: | |||
| f := new(float64) | |||
| if dv != nil { | |||
| *f = dv.(float64) | |||
| } | |||
| *(fptr.(**float64)) = f | |||
| case reflect.Int32: | |||
| // might be an enum | |||
| if ft := f.Type(); ft != int32PtrType { | |||
| // enum | |||
| f.Set(reflect.New(ft.Elem())) | |||
| if dv != nil { | |||
| f.Elem().SetInt(int64(dv.(int32))) | |||
| } | |||
| } else { | |||
| // int32 field | |||
| i := new(int32) | |||
| if dv != nil { | |||
| *i = dv.(int32) | |||
| } | |||
| *(fptr.(**int32)) = i | |||
| } | |||
| case reflect.Int64: | |||
| i := new(int64) | |||
| if dv != nil { | |||
| *i = dv.(int64) | |||
| } | |||
| *(fptr.(**int64)) = i | |||
| case reflect.String: | |||
| s := new(string) | |||
| if dv != nil { | |||
| *s = dv.(string) | |||
| } | |||
| *(fptr.(**string)) = s | |||
| case reflect.Uint8: | |||
| // exceptional case: []byte | |||
| var b []byte | |||
| if dv != nil { | |||
| db := dv.([]byte) | |||
| b = make([]byte, len(db)) | |||
| copy(b, db) | |||
| } else { | |||
| b = []byte{} | |||
| } | |||
| *(fptr.(*[]byte)) = b | |||
| case reflect.Uint32: | |||
| u := new(uint32) | |||
| if dv != nil { | |||
| *u = dv.(uint32) | |||
| } | |||
| *(fptr.(**uint32)) = u | |||
| case reflect.Uint64: | |||
| u := new(uint64) | |||
| if dv != nil { | |||
| *u = dv.(uint64) | |||
| } | |||
| *(fptr.(**uint64)) = u | |||
| default: | |||
| log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) | |||
| } | |||
| } | |||
| for _, ni := range dm.nested { | |||
| f := v.Field(ni) | |||
| // f is *T or []*T or map[T]*T | |||
| switch f.Kind() { | |||
| case reflect.Ptr: | |||
| if f.IsNil() { | |||
| continue | |||
| } | |||
| setDefaults(f, recur, zeros) | |||
| case reflect.Slice: | |||
| for i := 0; i < f.Len(); i++ { | |||
| e := f.Index(i) | |||
| if e.IsNil() { | |||
| continue | |||
| } | |||
| setDefaults(e, recur, zeros) | |||
| } | |||
| case reflect.Map: | |||
| for _, k := range f.MapKeys() { | |||
| e := f.MapIndex(k) | |||
| if e.IsNil() { | |||
| continue | |||
| } | |||
| setDefaults(e, recur, zeros) | |||
| } | |||
| } | |||
| } | |||
| } | |||
| var ( | |||
| // defaults maps a protocol buffer struct type to a slice of the fields, | |||
| // with its scalar fields set to their proto-declared non-zero default values. | |||
| defaultMu sync.RWMutex | |||
| defaults = make(map[reflect.Type]defaultMessage) | |||
| int32PtrType = reflect.TypeOf((*int32)(nil)) | |||
| ) | |||
| // defaultMessage represents information about the default values of a message. | |||
| type defaultMessage struct { | |||
| scalars []scalarField | |||
| nested []int // struct field index of nested messages | |||
| } | |||
| type scalarField struct { | |||
| index int // struct field index | |||
| kind reflect.Kind // element type (the T in *T or []T) | |||
| value interface{} // the proto-declared default value, or nil | |||
| } | |||
| // t is a struct type. | |||
| func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { | |||
| sprop := GetProperties(t) | |||
| for _, prop := range sprop.Prop { | |||
| fi, ok := sprop.decoderTags.get(prop.Tag) | |||
| if !ok { | |||
| // XXX_unrecognized | |||
| continue | |||
| } | |||
| ft := t.Field(fi).Type | |||
| sf, nested, err := fieldDefault(ft, prop) | |||
| switch { | |||
| case err != nil: | |||
| log.Print(err) | |||
| case nested: | |||
| dm.nested = append(dm.nested, fi) | |||
| case sf != nil: | |||
| sf.index = fi | |||
| dm.scalars = append(dm.scalars, *sf) | |||
| } | |||
| } | |||
| return dm | |||
| } | |||
| // fieldDefault returns the scalarField for field type ft. | |||
| // sf will be nil if the field can not have a default. | |||
| // nestedMessage will be true if this is a nested message. | |||
| // Note that sf.index is not set on return. | |||
| func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { | |||
| var canHaveDefault bool | |||
| switch ft.Kind() { | |||
| case reflect.Ptr: | |||
| if ft.Elem().Kind() == reflect.Struct { | |||
| nestedMessage = true | |||
| } else { | |||
| canHaveDefault = true // proto2 scalar field | |||
| } | |||
| case reflect.Slice: | |||
| switch ft.Elem().Kind() { | |||
| case reflect.Ptr: | |||
| nestedMessage = true // repeated message | |||
| case reflect.Uint8: | |||
| canHaveDefault = true // bytes field | |||
| } | |||
| case reflect.Map: | |||
| if ft.Elem().Kind() == reflect.Ptr { | |||
| nestedMessage = true // map with message values | |||
| } | |||
| } | |||
| if !canHaveDefault { | |||
| if nestedMessage { | |||
| return nil, true, nil | |||
| } | |||
| return nil, false, nil | |||
| } | |||
| // We now know that ft is a pointer or slice. | |||
| sf = &scalarField{kind: ft.Elem().Kind()} | |||
| // scalar fields without defaults | |||
| if !prop.HasDefault { | |||
| return sf, false, nil | |||
| } | |||
| // a scalar field: either *T or []byte | |||
| switch ft.Elem().Kind() { | |||
| case reflect.Bool: | |||
| x, err := strconv.ParseBool(prop.Default) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = x | |||
| case reflect.Float32: | |||
| x, err := strconv.ParseFloat(prop.Default, 32) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = float32(x) | |||
| case reflect.Float64: | |||
| x, err := strconv.ParseFloat(prop.Default, 64) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = x | |||
| case reflect.Int32: | |||
| x, err := strconv.ParseInt(prop.Default, 10, 32) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = int32(x) | |||
| case reflect.Int64: | |||
| x, err := strconv.ParseInt(prop.Default, 10, 64) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = x | |||
| case reflect.String: | |||
| sf.value = prop.Default | |||
| case reflect.Uint8: | |||
| // []byte (not *uint8) | |||
| sf.value = []byte(prop.Default) | |||
| case reflect.Uint32: | |||
| x, err := strconv.ParseUint(prop.Default, 10, 32) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = uint32(x) | |||
| case reflect.Uint64: | |||
| x, err := strconv.ParseUint(prop.Default, 10, 64) | |||
| if err != nil { | |||
| return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) | |||
| } | |||
| sf.value = x | |||
| default: | |||
| return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) | |||
| } | |||
| return sf, false, nil | |||
| } | |||
| // Map fields may have key types of non-float scalars, strings and enums. | |||
| // The easiest way to sort them in some deterministic order is to use fmt. | |||
| // If this turns out to be inefficient we can always consider other options, | |||
| // such as doing a Schwartzian transform. | |||
| func mapKeys(vs []reflect.Value) sort.Interface { | |||
| s := mapKeySorter{ | |||
| vs: vs, | |||
| // default Less function: textual comparison | |||
| less: func(a, b reflect.Value) bool { | |||
| return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) | |||
| }, | |||
| } | |||
| // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; | |||
| // numeric keys are sorted numerically. | |||
| if len(vs) == 0 { | |||
| return s | |||
| } | |||
| switch vs[0].Kind() { | |||
| case reflect.Int32, reflect.Int64: | |||
| s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } | |||
| case reflect.Uint32, reflect.Uint64: | |||
| s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } | |||
| } | |||
| return s | |||
| } | |||
| type mapKeySorter struct { | |||
| vs []reflect.Value | |||
| less func(a, b reflect.Value) bool | |||
| } | |||
| func (s mapKeySorter) Len() int { return len(s.vs) } | |||
| func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } | |||
| func (s mapKeySorter) Less(i, j int) bool { | |||
| return s.less(s.vs[i], s.vs[j]) | |||
| } | |||
| // isProto3Zero reports whether v is a zero proto3 value. | |||
| func isProto3Zero(v reflect.Value) bool { | |||
| switch v.Kind() { | |||
| case reflect.Bool: | |||
| return !v.Bool() | |||
| case reflect.Int32, reflect.Int64: | |||
| return v.Int() == 0 | |||
| case reflect.Uint32, reflect.Uint64: | |||
| return v.Uint() == 0 | |||
| case reflect.Float32, reflect.Float64: | |||
| return v.Float() == 0 | |||
| case reflect.String: | |||
| return v.String() == "" | |||
| } | |||
| return false | |||
| } | |||
| // ProtoPackageIsVersion1 is referenced from generated protocol buffer files | |||
| // to assert that that code is compatible with this version of the proto package. | |||
| const ProtoPackageIsVersion1 = true | |||
| @@ -0,0 +1,280 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| package proto | |||
| /* | |||
| * Support for message sets. | |||
| */ | |||
| import ( | |||
| "bytes" | |||
| "encoding/json" | |||
| "errors" | |||
| "fmt" | |||
| "reflect" | |||
| "sort" | |||
| ) | |||
| // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. | |||
| // A message type ID is required for storing a protocol buffer in a message set. | |||
| var errNoMessageTypeID = errors.New("proto does not have a message type ID") | |||
| // The first two types (_MessageSet_Item and messageSet) | |||
| // model what the protocol compiler produces for the following protocol message: | |||
| // message MessageSet { | |||
| // repeated group Item = 1 { | |||
| // required int32 type_id = 2; | |||
| // required string message = 3; | |||
| // }; | |||
| // } | |||
| // That is the MessageSet wire format. We can't use a proto to generate these | |||
| // because that would introduce a circular dependency between it and this package. | |||
| type _MessageSet_Item struct { | |||
| TypeId *int32 `protobuf:"varint,2,req,name=type_id"` | |||
| Message []byte `protobuf:"bytes,3,req,name=message"` | |||
| } | |||
| type messageSet struct { | |||
| Item []*_MessageSet_Item `protobuf:"group,1,rep"` | |||
| XXX_unrecognized []byte | |||
| // TODO: caching? | |||
| } | |||
| // Make sure messageSet is a Message. | |||
| var _ Message = (*messageSet)(nil) | |||
| // messageTypeIder is an interface satisfied by a protocol buffer type | |||
| // that may be stored in a MessageSet. | |||
| type messageTypeIder interface { | |||
| MessageTypeId() int32 | |||
| } | |||
| func (ms *messageSet) find(pb Message) *_MessageSet_Item { | |||
| mti, ok := pb.(messageTypeIder) | |||
| if !ok { | |||
| return nil | |||
| } | |||
| id := mti.MessageTypeId() | |||
| for _, item := range ms.Item { | |||
| if *item.TypeId == id { | |||
| return item | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (ms *messageSet) Has(pb Message) bool { | |||
| if ms.find(pb) != nil { | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| func (ms *messageSet) Unmarshal(pb Message) error { | |||
| if item := ms.find(pb); item != nil { | |||
| return Unmarshal(item.Message, pb) | |||
| } | |||
| if _, ok := pb.(messageTypeIder); !ok { | |||
| return errNoMessageTypeID | |||
| } | |||
| return nil // TODO: return error instead? | |||
| } | |||
| func (ms *messageSet) Marshal(pb Message) error { | |||
| msg, err := Marshal(pb) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if item := ms.find(pb); item != nil { | |||
| // reuse existing item | |||
| item.Message = msg | |||
| return nil | |||
| } | |||
| mti, ok := pb.(messageTypeIder) | |||
| if !ok { | |||
| return errNoMessageTypeID | |||
| } | |||
| mtid := mti.MessageTypeId() | |||
| ms.Item = append(ms.Item, &_MessageSet_Item{ | |||
| TypeId: &mtid, | |||
| Message: msg, | |||
| }) | |||
| return nil | |||
| } | |||
| func (ms *messageSet) Reset() { *ms = messageSet{} } | |||
| func (ms *messageSet) String() string { return CompactTextString(ms) } | |||
| func (*messageSet) ProtoMessage() {} | |||
| // Support for the message_set_wire_format message option. | |||
| func skipVarint(buf []byte) []byte { | |||
| i := 0 | |||
| for ; buf[i]&0x80 != 0; i++ { | |||
| } | |||
| return buf[i+1:] | |||
| } | |||
| // MarshalMessageSet encodes the extension map represented by m in the message set wire format. | |||
| // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. | |||
| func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { | |||
| if err := encodeExtensionMap(m); err != nil { | |||
| return nil, err | |||
| } | |||
| // Sort extension IDs to provide a deterministic encoding. | |||
| // See also enc_map in encode.go. | |||
| ids := make([]int, 0, len(m)) | |||
| for id := range m { | |||
| ids = append(ids, int(id)) | |||
| } | |||
| sort.Ints(ids) | |||
| ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} | |||
| for _, id := range ids { | |||
| e := m[int32(id)] | |||
| // Remove the wire type and field number varint, as well as the length varint. | |||
| msg := skipVarint(skipVarint(e.enc)) | |||
| ms.Item = append(ms.Item, &_MessageSet_Item{ | |||
| TypeId: Int32(int32(id)), | |||
| Message: msg, | |||
| }) | |||
| } | |||
| return Marshal(ms) | |||
| } | |||
| // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. | |||
| // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. | |||
| func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { | |||
| ms := new(messageSet) | |||
| if err := Unmarshal(buf, ms); err != nil { | |||
| return err | |||
| } | |||
| for _, item := range ms.Item { | |||
| id := *item.TypeId | |||
| msg := item.Message | |||
| // Restore wire type and field number varint, plus length varint. | |||
| // Be careful to preserve duplicate items. | |||
| b := EncodeVarint(uint64(id)<<3 | WireBytes) | |||
| if ext, ok := m[id]; ok { | |||
| // Existing data; rip off the tag and length varint | |||
| // so we join the new data correctly. | |||
| // We can assume that ext.enc is set because we are unmarshaling. | |||
| o := ext.enc[len(b):] // skip wire type and field number | |||
| _, n := DecodeVarint(o) // calculate length of length varint | |||
| o = o[n:] // skip length varint | |||
| msg = append(o, msg...) // join old data and new data | |||
| } | |||
| b = append(b, EncodeVarint(uint64(len(msg)))...) | |||
| b = append(b, msg...) | |||
| m[id] = Extension{enc: b} | |||
| } | |||
| return nil | |||
| } | |||
| // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. | |||
| // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. | |||
| func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { | |||
| var b bytes.Buffer | |||
| b.WriteByte('{') | |||
| // Process the map in key order for deterministic output. | |||
| ids := make([]int32, 0, len(m)) | |||
| for id := range m { | |||
| ids = append(ids, id) | |||
| } | |||
| sort.Sort(int32Slice(ids)) // int32Slice defined in text.go | |||
| for i, id := range ids { | |||
| ext := m[id] | |||
| if i > 0 { | |||
| b.WriteByte(',') | |||
| } | |||
| msd, ok := messageSetMap[id] | |||
| if !ok { | |||
| // Unknown type; we can't render it, so skip it. | |||
| continue | |||
| } | |||
| fmt.Fprintf(&b, `"[%s]":`, msd.name) | |||
| x := ext.value | |||
| if x == nil { | |||
| x = reflect.New(msd.t.Elem()).Interface() | |||
| if err := Unmarshal(ext.enc, x.(Message)); err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| d, err := json.Marshal(x) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| b.Write(d) | |||
| } | |||
| b.WriteByte('}') | |||
| return b.Bytes(), nil | |||
| } | |||
| // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. | |||
| // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. | |||
| func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { | |||
| // Common-case fast path. | |||
| if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { | |||
| return nil | |||
| } | |||
| // This is fairly tricky, and it's not clear that it is needed. | |||
| return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") | |||
| } | |||
| // A global registry of types that can be used in a MessageSet. | |||
| var messageSetMap = make(map[int32]messageSetDesc) | |||
| type messageSetDesc struct { | |||
| t reflect.Type // pointer to struct | |||
| name string | |||
| } | |||
| // RegisterMessageSetType is called from the generated code. | |||
| func RegisterMessageSetType(m Message, fieldNum int32, name string) { | |||
| messageSetMap[fieldNum] = messageSetDesc{ | |||
| t: reflect.TypeOf(m), | |||
| name: name, | |||
| } | |||
| } | |||
| @@ -0,0 +1,479 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2012 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| // +build appengine | |||
| // This file contains an implementation of proto field accesses using package reflect. | |||
| // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can | |||
| // be used on App Engine. | |||
| package proto | |||
| import ( | |||
| "math" | |||
| "reflect" | |||
| ) | |||
| // A structPointer is a pointer to a struct. | |||
| type structPointer struct { | |||
| v reflect.Value | |||
| } | |||
| // toStructPointer returns a structPointer equivalent to the given reflect value. | |||
| // The reflect value must itself be a pointer to a struct. | |||
| func toStructPointer(v reflect.Value) structPointer { | |||
| return structPointer{v} | |||
| } | |||
| // IsNil reports whether p is nil. | |||
| func structPointer_IsNil(p structPointer) bool { | |||
| return p.v.IsNil() | |||
| } | |||
| // Interface returns the struct pointer as an interface value. | |||
| func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { | |||
| return p.v.Interface() | |||
| } | |||
| // A field identifies a field in a struct, accessible from a structPointer. | |||
| // In this implementation, a field is identified by the sequence of field indices | |||
| // passed to reflect's FieldByIndex. | |||
| type field []int | |||
| // toField returns a field equivalent to the given reflect field. | |||
| func toField(f *reflect.StructField) field { | |||
| return f.Index | |||
| } | |||
| // invalidField is an invalid field identifier. | |||
| var invalidField = field(nil) | |||
| // IsValid reports whether the field identifier is valid. | |||
| func (f field) IsValid() bool { return f != nil } | |||
| // field returns the given field in the struct as a reflect value. | |||
| func structPointer_field(p structPointer, f field) reflect.Value { | |||
| // Special case: an extension map entry with a value of type T | |||
| // passes a *T to the struct-handling code with a zero field, | |||
| // expecting that it will be treated as equivalent to *struct{ X T }, | |||
| // which has the same memory layout. We have to handle that case | |||
| // specially, because reflect will panic if we call FieldByIndex on a | |||
| // non-struct. | |||
| if f == nil { | |||
| return p.v.Elem() | |||
| } | |||
| return p.v.Elem().FieldByIndex(f) | |||
| } | |||
| // ifield returns the given field in the struct as an interface value. | |||
| func structPointer_ifield(p structPointer, f field) interface{} { | |||
| return structPointer_field(p, f).Addr().Interface() | |||
| } | |||
| // Bytes returns the address of a []byte field in the struct. | |||
| func structPointer_Bytes(p structPointer, f field) *[]byte { | |||
| return structPointer_ifield(p, f).(*[]byte) | |||
| } | |||
| // BytesSlice returns the address of a [][]byte field in the struct. | |||
| func structPointer_BytesSlice(p structPointer, f field) *[][]byte { | |||
| return structPointer_ifield(p, f).(*[][]byte) | |||
| } | |||
| // Bool returns the address of a *bool field in the struct. | |||
| func structPointer_Bool(p structPointer, f field) **bool { | |||
| return structPointer_ifield(p, f).(**bool) | |||
| } | |||
| // BoolVal returns the address of a bool field in the struct. | |||
| func structPointer_BoolVal(p structPointer, f field) *bool { | |||
| return structPointer_ifield(p, f).(*bool) | |||
| } | |||
| // BoolSlice returns the address of a []bool field in the struct. | |||
| func structPointer_BoolSlice(p structPointer, f field) *[]bool { | |||
| return structPointer_ifield(p, f).(*[]bool) | |||
| } | |||
| // String returns the address of a *string field in the struct. | |||
| func structPointer_String(p structPointer, f field) **string { | |||
| return structPointer_ifield(p, f).(**string) | |||
| } | |||
| // StringVal returns the address of a string field in the struct. | |||
| func structPointer_StringVal(p structPointer, f field) *string { | |||
| return structPointer_ifield(p, f).(*string) | |||
| } | |||
| // StringSlice returns the address of a []string field in the struct. | |||
| func structPointer_StringSlice(p structPointer, f field) *[]string { | |||
| return structPointer_ifield(p, f).(*[]string) | |||
| } | |||
| // ExtMap returns the address of an extension map field in the struct. | |||
| func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { | |||
| return structPointer_ifield(p, f).(*map[int32]Extension) | |||
| } | |||
| // NewAt returns the reflect.Value for a pointer to a field in the struct. | |||
| func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { | |||
| return structPointer_field(p, f).Addr() | |||
| } | |||
| // SetStructPointer writes a *struct field in the struct. | |||
| func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { | |||
| structPointer_field(p, f).Set(q.v) | |||
| } | |||
| // GetStructPointer reads a *struct field in the struct. | |||
| func structPointer_GetStructPointer(p structPointer, f field) structPointer { | |||
| return structPointer{structPointer_field(p, f)} | |||
| } | |||
| // StructPointerSlice the address of a []*struct field in the struct. | |||
| func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { | |||
| return structPointerSlice{structPointer_field(p, f)} | |||
| } | |||
| // A structPointerSlice represents the address of a slice of pointers to structs | |||
| // (themselves messages or groups). That is, v.Type() is *[]*struct{...}. | |||
| type structPointerSlice struct { | |||
| v reflect.Value | |||
| } | |||
| func (p structPointerSlice) Len() int { return p.v.Len() } | |||
| func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } | |||
| func (p structPointerSlice) Append(q structPointer) { | |||
| p.v.Set(reflect.Append(p.v, q.v)) | |||
| } | |||
| var ( | |||
| int32Type = reflect.TypeOf(int32(0)) | |||
| uint32Type = reflect.TypeOf(uint32(0)) | |||
| float32Type = reflect.TypeOf(float32(0)) | |||
| int64Type = reflect.TypeOf(int64(0)) | |||
| uint64Type = reflect.TypeOf(uint64(0)) | |||
| float64Type = reflect.TypeOf(float64(0)) | |||
| ) | |||
| // A word32 represents a field of type *int32, *uint32, *float32, or *enum. | |||
| // That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. | |||
| type word32 struct { | |||
| v reflect.Value | |||
| } | |||
| // IsNil reports whether p is nil. | |||
| func word32_IsNil(p word32) bool { | |||
| return p.v.IsNil() | |||
| } | |||
| // Set sets p to point at a newly allocated word with bits set to x. | |||
| func word32_Set(p word32, o *Buffer, x uint32) { | |||
| t := p.v.Type().Elem() | |||
| switch t { | |||
| case int32Type: | |||
| if len(o.int32s) == 0 { | |||
| o.int32s = make([]int32, uint32PoolSize) | |||
| } | |||
| o.int32s[0] = int32(x) | |||
| p.v.Set(reflect.ValueOf(&o.int32s[0])) | |||
| o.int32s = o.int32s[1:] | |||
| return | |||
| case uint32Type: | |||
| if len(o.uint32s) == 0 { | |||
| o.uint32s = make([]uint32, uint32PoolSize) | |||
| } | |||
| o.uint32s[0] = x | |||
| p.v.Set(reflect.ValueOf(&o.uint32s[0])) | |||
| o.uint32s = o.uint32s[1:] | |||
| return | |||
| case float32Type: | |||
| if len(o.float32s) == 0 { | |||
| o.float32s = make([]float32, uint32PoolSize) | |||
| } | |||
| o.float32s[0] = math.Float32frombits(x) | |||
| p.v.Set(reflect.ValueOf(&o.float32s[0])) | |||
| o.float32s = o.float32s[1:] | |||
| return | |||
| } | |||
| // must be enum | |||
| p.v.Set(reflect.New(t)) | |||
| p.v.Elem().SetInt(int64(int32(x))) | |||
| } | |||
| // Get gets the bits pointed at by p, as a uint32. | |||
| func word32_Get(p word32) uint32 { | |||
| elem := p.v.Elem() | |||
| switch elem.Kind() { | |||
| case reflect.Int32: | |||
| return uint32(elem.Int()) | |||
| case reflect.Uint32: | |||
| return uint32(elem.Uint()) | |||
| case reflect.Float32: | |||
| return math.Float32bits(float32(elem.Float())) | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| // Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. | |||
| func structPointer_Word32(p structPointer, f field) word32 { | |||
| return word32{structPointer_field(p, f)} | |||
| } | |||
| // A word32Val represents a field of type int32, uint32, float32, or enum. | |||
| // That is, v.Type() is int32, uint32, float32, or enum and v is assignable. | |||
| type word32Val struct { | |||
| v reflect.Value | |||
| } | |||
| // Set sets *p to x. | |||
| func word32Val_Set(p word32Val, x uint32) { | |||
| switch p.v.Type() { | |||
| case int32Type: | |||
| p.v.SetInt(int64(x)) | |||
| return | |||
| case uint32Type: | |||
| p.v.SetUint(uint64(x)) | |||
| return | |||
| case float32Type: | |||
| p.v.SetFloat(float64(math.Float32frombits(x))) | |||
| return | |||
| } | |||
| // must be enum | |||
| p.v.SetInt(int64(int32(x))) | |||
| } | |||
| // Get gets the bits pointed at by p, as a uint32. | |||
| func word32Val_Get(p word32Val) uint32 { | |||
| elem := p.v | |||
| switch elem.Kind() { | |||
| case reflect.Int32: | |||
| return uint32(elem.Int()) | |||
| case reflect.Uint32: | |||
| return uint32(elem.Uint()) | |||
| case reflect.Float32: | |||
| return math.Float32bits(float32(elem.Float())) | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| // Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. | |||
| func structPointer_Word32Val(p structPointer, f field) word32Val { | |||
| return word32Val{structPointer_field(p, f)} | |||
| } | |||
| // A word32Slice is a slice of 32-bit values. | |||
| // That is, v.Type() is []int32, []uint32, []float32, or []enum. | |||
| type word32Slice struct { | |||
| v reflect.Value | |||
| } | |||
| func (p word32Slice) Append(x uint32) { | |||
| n, m := p.v.Len(), p.v.Cap() | |||
| if n < m { | |||
| p.v.SetLen(n + 1) | |||
| } else { | |||
| t := p.v.Type().Elem() | |||
| p.v.Set(reflect.Append(p.v, reflect.Zero(t))) | |||
| } | |||
| elem := p.v.Index(n) | |||
| switch elem.Kind() { | |||
| case reflect.Int32: | |||
| elem.SetInt(int64(int32(x))) | |||
| case reflect.Uint32: | |||
| elem.SetUint(uint64(x)) | |||
| case reflect.Float32: | |||
| elem.SetFloat(float64(math.Float32frombits(x))) | |||
| } | |||
| } | |||
| func (p word32Slice) Len() int { | |||
| return p.v.Len() | |||
| } | |||
| func (p word32Slice) Index(i int) uint32 { | |||
| elem := p.v.Index(i) | |||
| switch elem.Kind() { | |||
| case reflect.Int32: | |||
| return uint32(elem.Int()) | |||
| case reflect.Uint32: | |||
| return uint32(elem.Uint()) | |||
| case reflect.Float32: | |||
| return math.Float32bits(float32(elem.Float())) | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| // Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. | |||
| func structPointer_Word32Slice(p structPointer, f field) word32Slice { | |||
| return word32Slice{structPointer_field(p, f)} | |||
| } | |||
| // word64 is like word32 but for 64-bit values. | |||
| type word64 struct { | |||
| v reflect.Value | |||
| } | |||
| func word64_Set(p word64, o *Buffer, x uint64) { | |||
| t := p.v.Type().Elem() | |||
| switch t { | |||
| case int64Type: | |||
| if len(o.int64s) == 0 { | |||
| o.int64s = make([]int64, uint64PoolSize) | |||
| } | |||
| o.int64s[0] = int64(x) | |||
| p.v.Set(reflect.ValueOf(&o.int64s[0])) | |||
| o.int64s = o.int64s[1:] | |||
| return | |||
| case uint64Type: | |||
| if len(o.uint64s) == 0 { | |||
| o.uint64s = make([]uint64, uint64PoolSize) | |||
| } | |||
| o.uint64s[0] = x | |||
| p.v.Set(reflect.ValueOf(&o.uint64s[0])) | |||
| o.uint64s = o.uint64s[1:] | |||
| return | |||
| case float64Type: | |||
| if len(o.float64s) == 0 { | |||
| o.float64s = make([]float64, uint64PoolSize) | |||
| } | |||
| o.float64s[0] = math.Float64frombits(x) | |||
| p.v.Set(reflect.ValueOf(&o.float64s[0])) | |||
| o.float64s = o.float64s[1:] | |||
| return | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| func word64_IsNil(p word64) bool { | |||
| return p.v.IsNil() | |||
| } | |||
| func word64_Get(p word64) uint64 { | |||
| elem := p.v.Elem() | |||
| switch elem.Kind() { | |||
| case reflect.Int64: | |||
| return uint64(elem.Int()) | |||
| case reflect.Uint64: | |||
| return elem.Uint() | |||
| case reflect.Float64: | |||
| return math.Float64bits(elem.Float()) | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| func structPointer_Word64(p structPointer, f field) word64 { | |||
| return word64{structPointer_field(p, f)} | |||
| } | |||
| // word64Val is like word32Val but for 64-bit values. | |||
| type word64Val struct { | |||
| v reflect.Value | |||
| } | |||
| func word64Val_Set(p word64Val, o *Buffer, x uint64) { | |||
| switch p.v.Type() { | |||
| case int64Type: | |||
| p.v.SetInt(int64(x)) | |||
| return | |||
| case uint64Type: | |||
| p.v.SetUint(x) | |||
| return | |||
| case float64Type: | |||
| p.v.SetFloat(math.Float64frombits(x)) | |||
| return | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| func word64Val_Get(p word64Val) uint64 { | |||
| elem := p.v | |||
| switch elem.Kind() { | |||
| case reflect.Int64: | |||
| return uint64(elem.Int()) | |||
| case reflect.Uint64: | |||
| return elem.Uint() | |||
| case reflect.Float64: | |||
| return math.Float64bits(elem.Float()) | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| func structPointer_Word64Val(p structPointer, f field) word64Val { | |||
| return word64Val{structPointer_field(p, f)} | |||
| } | |||
| type word64Slice struct { | |||
| v reflect.Value | |||
| } | |||
| func (p word64Slice) Append(x uint64) { | |||
| n, m := p.v.Len(), p.v.Cap() | |||
| if n < m { | |||
| p.v.SetLen(n + 1) | |||
| } else { | |||
| t := p.v.Type().Elem() | |||
| p.v.Set(reflect.Append(p.v, reflect.Zero(t))) | |||
| } | |||
| elem := p.v.Index(n) | |||
| switch elem.Kind() { | |||
| case reflect.Int64: | |||
| elem.SetInt(int64(int64(x))) | |||
| case reflect.Uint64: | |||
| elem.SetUint(uint64(x)) | |||
| case reflect.Float64: | |||
| elem.SetFloat(float64(math.Float64frombits(x))) | |||
| } | |||
| } | |||
| func (p word64Slice) Len() int { | |||
| return p.v.Len() | |||
| } | |||
| func (p word64Slice) Index(i int) uint64 { | |||
| elem := p.v.Index(i) | |||
| switch elem.Kind() { | |||
| case reflect.Int64: | |||
| return uint64(elem.Int()) | |||
| case reflect.Uint64: | |||
| return uint64(elem.Uint()) | |||
| case reflect.Float64: | |||
| return math.Float64bits(float64(elem.Float())) | |||
| } | |||
| panic("unreachable") | |||
| } | |||
| func structPointer_Word64Slice(p structPointer, f field) word64Slice { | |||
| return word64Slice{structPointer_field(p, f)} | |||
| } | |||
| @@ -0,0 +1,266 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2012 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| // +build !appengine | |||
| // This file contains the implementation of the proto field accesses using package unsafe. | |||
| package proto | |||
| import ( | |||
| "reflect" | |||
| "unsafe" | |||
| ) | |||
| // NOTE: These type_Foo functions would more idiomatically be methods, | |||
| // but Go does not allow methods on pointer types, and we must preserve | |||
| // some pointer type for the garbage collector. We use these | |||
| // funcs with clunky names as our poor approximation to methods. | |||
| // | |||
| // An alternative would be | |||
| // type structPointer struct { p unsafe.Pointer } | |||
| // but that does not registerize as well. | |||
| // A structPointer is a pointer to a struct. | |||
| type structPointer unsafe.Pointer | |||
| // toStructPointer returns a structPointer equivalent to the given reflect value. | |||
| func toStructPointer(v reflect.Value) structPointer { | |||
| return structPointer(unsafe.Pointer(v.Pointer())) | |||
| } | |||
| // IsNil reports whether p is nil. | |||
| func structPointer_IsNil(p structPointer) bool { | |||
| return p == nil | |||
| } | |||
| // Interface returns the struct pointer, assumed to have element type t, | |||
| // as an interface value. | |||
| func structPointer_Interface(p structPointer, t reflect.Type) interface{} { | |||
| return reflect.NewAt(t, unsafe.Pointer(p)).Interface() | |||
| } | |||
| // A field identifies a field in a struct, accessible from a structPointer. | |||
| // In this implementation, a field is identified by its byte offset from the start of the struct. | |||
| type field uintptr | |||
| // toField returns a field equivalent to the given reflect field. | |||
| func toField(f *reflect.StructField) field { | |||
| return field(f.Offset) | |||
| } | |||
| // invalidField is an invalid field identifier. | |||
| const invalidField = ^field(0) | |||
| // IsValid reports whether the field identifier is valid. | |||
| func (f field) IsValid() bool { | |||
| return f != ^field(0) | |||
| } | |||
| // Bytes returns the address of a []byte field in the struct. | |||
| func structPointer_Bytes(p structPointer, f field) *[]byte { | |||
| return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // BytesSlice returns the address of a [][]byte field in the struct. | |||
| func structPointer_BytesSlice(p structPointer, f field) *[][]byte { | |||
| return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // Bool returns the address of a *bool field in the struct. | |||
| func structPointer_Bool(p structPointer, f field) **bool { | |||
| return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // BoolVal returns the address of a bool field in the struct. | |||
| func structPointer_BoolVal(p structPointer, f field) *bool { | |||
| return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // BoolSlice returns the address of a []bool field in the struct. | |||
| func structPointer_BoolSlice(p structPointer, f field) *[]bool { | |||
| return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // String returns the address of a *string field in the struct. | |||
| func structPointer_String(p structPointer, f field) **string { | |||
| return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // StringVal returns the address of a string field in the struct. | |||
| func structPointer_StringVal(p structPointer, f field) *string { | |||
| return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // StringSlice returns the address of a []string field in the struct. | |||
| func structPointer_StringSlice(p structPointer, f field) *[]string { | |||
| return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // ExtMap returns the address of an extension map field in the struct. | |||
| func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { | |||
| return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // NewAt returns the reflect.Value for a pointer to a field in the struct. | |||
| func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { | |||
| return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) | |||
| } | |||
| // SetStructPointer writes a *struct field in the struct. | |||
| func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { | |||
| *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q | |||
| } | |||
| // GetStructPointer reads a *struct field in the struct. | |||
| func structPointer_GetStructPointer(p structPointer, f field) structPointer { | |||
| return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // StructPointerSlice the address of a []*struct field in the struct. | |||
| func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { | |||
| return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). | |||
| type structPointerSlice []structPointer | |||
| func (v *structPointerSlice) Len() int { return len(*v) } | |||
| func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } | |||
| func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } | |||
| // A word32 is the address of a "pointer to 32-bit value" field. | |||
| type word32 **uint32 | |||
| // IsNil reports whether *v is nil. | |||
| func word32_IsNil(p word32) bool { | |||
| return *p == nil | |||
| } | |||
| // Set sets *v to point at a newly allocated word set to x. | |||
| func word32_Set(p word32, o *Buffer, x uint32) { | |||
| if len(o.uint32s) == 0 { | |||
| o.uint32s = make([]uint32, uint32PoolSize) | |||
| } | |||
| o.uint32s[0] = x | |||
| *p = &o.uint32s[0] | |||
| o.uint32s = o.uint32s[1:] | |||
| } | |||
| // Get gets the value pointed at by *v. | |||
| func word32_Get(p word32) uint32 { | |||
| return **p | |||
| } | |||
| // Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. | |||
| func structPointer_Word32(p structPointer, f field) word32 { | |||
| return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
| } | |||
| // A word32Val is the address of a 32-bit value field. | |||
| type word32Val *uint32 | |||
| // Set sets *p to x. | |||
| func word32Val_Set(p word32Val, x uint32) { | |||
| *p = x | |||
| } | |||
| // Get gets the value pointed at by p. | |||
| func word32Val_Get(p word32Val) uint32 { | |||
| return *p | |||
| } | |||
| // Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. | |||
| func structPointer_Word32Val(p structPointer, f field) word32Val { | |||
| return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
| } | |||
| // A word32Slice is a slice of 32-bit values. | |||
| type word32Slice []uint32 | |||
| func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } | |||
| func (v *word32Slice) Len() int { return len(*v) } | |||
| func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } | |||
| // Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. | |||
| func structPointer_Word32Slice(p structPointer, f field) *word32Slice { | |||
| return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| // word64 is like word32 but for 64-bit values. | |||
| type word64 **uint64 | |||
| func word64_Set(p word64, o *Buffer, x uint64) { | |||
| if len(o.uint64s) == 0 { | |||
| o.uint64s = make([]uint64, uint64PoolSize) | |||
| } | |||
| o.uint64s[0] = x | |||
| *p = &o.uint64s[0] | |||
| o.uint64s = o.uint64s[1:] | |||
| } | |||
| func word64_IsNil(p word64) bool { | |||
| return *p == nil | |||
| } | |||
| func word64_Get(p word64) uint64 { | |||
| return **p | |||
| } | |||
| func structPointer_Word64(p structPointer, f field) word64 { | |||
| return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
| } | |||
| // word64Val is like word32Val but for 64-bit values. | |||
| type word64Val *uint64 | |||
| func word64Val_Set(p word64Val, o *Buffer, x uint64) { | |||
| *p = x | |||
| } | |||
| func word64Val_Get(p word64Val) uint64 { | |||
| return *p | |||
| } | |||
| func structPointer_Word64Val(p structPointer, f field) word64Val { | |||
| return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) | |||
| } | |||
| // word64Slice is like word32Slice but for 64-bit values. | |||
| type word64Slice []uint64 | |||
| func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } | |||
| func (v *word64Slice) Len() int { return len(*v) } | |||
| func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } | |||
| func structPointer_Word64Slice(p structPointer, f field) *word64Slice { | |||
| return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) | |||
| } | |||
| @@ -0,0 +1,846 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| package proto | |||
| /* | |||
| * Routines for encoding data into the wire format for protocol buffers. | |||
| */ | |||
| import ( | |||
| "fmt" | |||
| "log" | |||
| "os" | |||
| "reflect" | |||
| "sort" | |||
| "strconv" | |||
| "strings" | |||
| "sync" | |||
| ) | |||
| const debug bool = false | |||
| // Constants that identify the encoding of a value on the wire. | |||
| const ( | |||
| WireVarint = 0 | |||
| WireFixed64 = 1 | |||
| WireBytes = 2 | |||
| WireStartGroup = 3 | |||
| WireEndGroup = 4 | |||
| WireFixed32 = 5 | |||
| ) | |||
| const startSize = 10 // initial slice/string sizes | |||
| // Encoders are defined in encode.go | |||
| // An encoder outputs the full representation of a field, including its | |||
| // tag and encoder type. | |||
| type encoder func(p *Buffer, prop *Properties, base structPointer) error | |||
| // A valueEncoder encodes a single integer in a particular encoding. | |||
| type valueEncoder func(o *Buffer, x uint64) error | |||
| // Sizers are defined in encode.go | |||
| // A sizer returns the encoded size of a field, including its tag and encoder | |||
| // type. | |||
| type sizer func(prop *Properties, base structPointer) int | |||
| // A valueSizer returns the encoded size of a single integer in a particular | |||
| // encoding. | |||
| type valueSizer func(x uint64) int | |||
| // Decoders are defined in decode.go | |||
| // A decoder creates a value from its wire representation. | |||
| // Unrecognized subelements are saved in unrec. | |||
| type decoder func(p *Buffer, prop *Properties, base structPointer) error | |||
| // A valueDecoder decodes a single integer in a particular encoding. | |||
| type valueDecoder func(o *Buffer) (x uint64, err error) | |||
| // A oneofMarshaler does the marshaling for all oneof fields in a message. | |||
| type oneofMarshaler func(Message, *Buffer) error | |||
| // A oneofUnmarshaler does the unmarshaling for a oneof field in a message. | |||
| type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) | |||
| // A oneofSizer does the sizing for all oneof fields in a message. | |||
| type oneofSizer func(Message) int | |||
| // tagMap is an optimization over map[int]int for typical protocol buffer | |||
| // use-cases. Encoded protocol buffers are often in tag order with small tag | |||
| // numbers. | |||
| type tagMap struct { | |||
| fastTags []int | |||
| slowTags map[int]int | |||
| } | |||
| // tagMapFastLimit is the upper bound on the tag number that will be stored in | |||
| // the tagMap slice rather than its map. | |||
| const tagMapFastLimit = 1024 | |||
| func (p *tagMap) get(t int) (int, bool) { | |||
| if t > 0 && t < tagMapFastLimit { | |||
| if t >= len(p.fastTags) { | |||
| return 0, false | |||
| } | |||
| fi := p.fastTags[t] | |||
| return fi, fi >= 0 | |||
| } | |||
| fi, ok := p.slowTags[t] | |||
| return fi, ok | |||
| } | |||
| func (p *tagMap) put(t int, fi int) { | |||
| if t > 0 && t < tagMapFastLimit { | |||
| for len(p.fastTags) < t+1 { | |||
| p.fastTags = append(p.fastTags, -1) | |||
| } | |||
| p.fastTags[t] = fi | |||
| return | |||
| } | |||
| if p.slowTags == nil { | |||
| p.slowTags = make(map[int]int) | |||
| } | |||
| p.slowTags[t] = fi | |||
| } | |||
| // StructProperties represents properties for all the fields of a struct. | |||
| // decoderTags and decoderOrigNames should only be used by the decoder. | |||
| type StructProperties struct { | |||
| Prop []*Properties // properties for each field | |||
| reqCount int // required count | |||
| decoderTags tagMap // map from proto tag to struct field number | |||
| decoderOrigNames map[string]int // map from original name to struct field number | |||
| order []int // list of struct field numbers in tag order | |||
| unrecField field // field id of the XXX_unrecognized []byte field | |||
| extendable bool // is this an extendable proto | |||
| oneofMarshaler oneofMarshaler | |||
| oneofUnmarshaler oneofUnmarshaler | |||
| oneofSizer oneofSizer | |||
| stype reflect.Type | |||
| // OneofTypes contains information about the oneof fields in this message. | |||
| // It is keyed by the original name of a field. | |||
| OneofTypes map[string]*OneofProperties | |||
| } | |||
| // OneofProperties represents information about a specific field in a oneof. | |||
| type OneofProperties struct { | |||
| Type reflect.Type // pointer to generated struct type for this oneof field | |||
| Field int // struct field number of the containing oneof in the message | |||
| Prop *Properties | |||
| } | |||
| // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. | |||
| // See encode.go, (*Buffer).enc_struct. | |||
| func (sp *StructProperties) Len() int { return len(sp.order) } | |||
| func (sp *StructProperties) Less(i, j int) bool { | |||
| return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag | |||
| } | |||
| func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } | |||
| // Properties represents the protocol-specific behavior of a single struct field. | |||
| type Properties struct { | |||
| Name string // name of the field, for error messages | |||
| OrigName string // original name before protocol compiler (always set) | |||
| JSONName string // name to use for JSON; determined by protoc | |||
| Wire string | |||
| WireType int | |||
| Tag int | |||
| Required bool | |||
| Optional bool | |||
| Repeated bool | |||
| Packed bool // relevant for repeated primitives only | |||
| Enum string // set for enum types only | |||
| proto3 bool // whether this is known to be a proto3 field; set for []byte only | |||
| oneof bool // whether this is a oneof field | |||
| Default string // default value | |||
| HasDefault bool // whether an explicit default was provided | |||
| def_uint64 uint64 | |||
| enc encoder | |||
| valEnc valueEncoder // set for bool and numeric types only | |||
| field field | |||
| tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) | |||
| tagbuf [8]byte | |||
| stype reflect.Type // set for struct types only | |||
| sprop *StructProperties // set for struct types only | |||
| isMarshaler bool | |||
| isUnmarshaler bool | |||
| mtype reflect.Type // set for map types only | |||
| mkeyprop *Properties // set for map types only | |||
| mvalprop *Properties // set for map types only | |||
| size sizer | |||
| valSize valueSizer // set for bool and numeric types only | |||
| dec decoder | |||
| valDec valueDecoder // set for bool and numeric types only | |||
| // If this is a packable field, this will be the decoder for the packed version of the field. | |||
| packedDec decoder | |||
| } | |||
| // String formats the properties in the protobuf struct field tag style. | |||
| func (p *Properties) String() string { | |||
| s := p.Wire | |||
| s = "," | |||
| s += strconv.Itoa(p.Tag) | |||
| if p.Required { | |||
| s += ",req" | |||
| } | |||
| if p.Optional { | |||
| s += ",opt" | |||
| } | |||
| if p.Repeated { | |||
| s += ",rep" | |||
| } | |||
| if p.Packed { | |||
| s += ",packed" | |||
| } | |||
| s += ",name=" + p.OrigName | |||
| if p.JSONName != p.OrigName { | |||
| s += ",json=" + p.JSONName | |||
| } | |||
| if p.proto3 { | |||
| s += ",proto3" | |||
| } | |||
| if p.oneof { | |||
| s += ",oneof" | |||
| } | |||
| if len(p.Enum) > 0 { | |||
| s += ",enum=" + p.Enum | |||
| } | |||
| if p.HasDefault { | |||
| s += ",def=" + p.Default | |||
| } | |||
| return s | |||
| } | |||
| // Parse populates p by parsing a string in the protobuf struct field tag style. | |||
| func (p *Properties) Parse(s string) { | |||
| // "bytes,49,opt,name=foo,def=hello!" | |||
| fields := strings.Split(s, ",") // breaks def=, but handled below. | |||
| if len(fields) < 2 { | |||
| fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) | |||
| return | |||
| } | |||
| p.Wire = fields[0] | |||
| switch p.Wire { | |||
| case "varint": | |||
| p.WireType = WireVarint | |||
| p.valEnc = (*Buffer).EncodeVarint | |||
| p.valDec = (*Buffer).DecodeVarint | |||
| p.valSize = sizeVarint | |||
| case "fixed32": | |||
| p.WireType = WireFixed32 | |||
| p.valEnc = (*Buffer).EncodeFixed32 | |||
| p.valDec = (*Buffer).DecodeFixed32 | |||
| p.valSize = sizeFixed32 | |||
| case "fixed64": | |||
| p.WireType = WireFixed64 | |||
| p.valEnc = (*Buffer).EncodeFixed64 | |||
| p.valDec = (*Buffer).DecodeFixed64 | |||
| p.valSize = sizeFixed64 | |||
| case "zigzag32": | |||
| p.WireType = WireVarint | |||
| p.valEnc = (*Buffer).EncodeZigzag32 | |||
| p.valDec = (*Buffer).DecodeZigzag32 | |||
| p.valSize = sizeZigzag32 | |||
| case "zigzag64": | |||
| p.WireType = WireVarint | |||
| p.valEnc = (*Buffer).EncodeZigzag64 | |||
| p.valDec = (*Buffer).DecodeZigzag64 | |||
| p.valSize = sizeZigzag64 | |||
| case "bytes", "group": | |||
| p.WireType = WireBytes | |||
| // no numeric converter for non-numeric types | |||
| default: | |||
| fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) | |||
| return | |||
| } | |||
| var err error | |||
| p.Tag, err = strconv.Atoi(fields[1]) | |||
| if err != nil { | |||
| return | |||
| } | |||
| for i := 2; i < len(fields); i++ { | |||
| f := fields[i] | |||
| switch { | |||
| case f == "req": | |||
| p.Required = true | |||
| case f == "opt": | |||
| p.Optional = true | |||
| case f == "rep": | |||
| p.Repeated = true | |||
| case f == "packed": | |||
| p.Packed = true | |||
| case strings.HasPrefix(f, "name="): | |||
| p.OrigName = f[5:] | |||
| case strings.HasPrefix(f, "json="): | |||
| p.JSONName = f[5:] | |||
| case strings.HasPrefix(f, "enum="): | |||
| p.Enum = f[5:] | |||
| case f == "proto3": | |||
| p.proto3 = true | |||
| case f == "oneof": | |||
| p.oneof = true | |||
| case strings.HasPrefix(f, "def="): | |||
| p.HasDefault = true | |||
| p.Default = f[4:] // rest of string | |||
| if i+1 < len(fields) { | |||
| // Commas aren't escaped, and def is always last. | |||
| p.Default += "," + strings.Join(fields[i+1:], ",") | |||
| break | |||
| } | |||
| } | |||
| } | |||
| } | |||
| func logNoSliceEnc(t1, t2 reflect.Type) { | |||
| fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) | |||
| } | |||
| var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() | |||
| // Initialize the fields for encoding and decoding. | |||
| func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { | |||
| p.enc = nil | |||
| p.dec = nil | |||
| p.size = nil | |||
| switch t1 := typ; t1.Kind() { | |||
| default: | |||
| fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) | |||
| // proto3 scalar types | |||
| case reflect.Bool: | |||
| p.enc = (*Buffer).enc_proto3_bool | |||
| p.dec = (*Buffer).dec_proto3_bool | |||
| p.size = size_proto3_bool | |||
| case reflect.Int32: | |||
| p.enc = (*Buffer).enc_proto3_int32 | |||
| p.dec = (*Buffer).dec_proto3_int32 | |||
| p.size = size_proto3_int32 | |||
| case reflect.Uint32: | |||
| p.enc = (*Buffer).enc_proto3_uint32 | |||
| p.dec = (*Buffer).dec_proto3_int32 // can reuse | |||
| p.size = size_proto3_uint32 | |||
| case reflect.Int64, reflect.Uint64: | |||
| p.enc = (*Buffer).enc_proto3_int64 | |||
| p.dec = (*Buffer).dec_proto3_int64 | |||
| p.size = size_proto3_int64 | |||
| case reflect.Float32: | |||
| p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits | |||
| p.dec = (*Buffer).dec_proto3_int32 | |||
| p.size = size_proto3_uint32 | |||
| case reflect.Float64: | |||
| p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits | |||
| p.dec = (*Buffer).dec_proto3_int64 | |||
| p.size = size_proto3_int64 | |||
| case reflect.String: | |||
| p.enc = (*Buffer).enc_proto3_string | |||
| p.dec = (*Buffer).dec_proto3_string | |||
| p.size = size_proto3_string | |||
| case reflect.Ptr: | |||
| switch t2 := t1.Elem(); t2.Kind() { | |||
| default: | |||
| fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) | |||
| break | |||
| case reflect.Bool: | |||
| p.enc = (*Buffer).enc_bool | |||
| p.dec = (*Buffer).dec_bool | |||
| p.size = size_bool | |||
| case reflect.Int32: | |||
| p.enc = (*Buffer).enc_int32 | |||
| p.dec = (*Buffer).dec_int32 | |||
| p.size = size_int32 | |||
| case reflect.Uint32: | |||
| p.enc = (*Buffer).enc_uint32 | |||
| p.dec = (*Buffer).dec_int32 // can reuse | |||
| p.size = size_uint32 | |||
| case reflect.Int64, reflect.Uint64: | |||
| p.enc = (*Buffer).enc_int64 | |||
| p.dec = (*Buffer).dec_int64 | |||
| p.size = size_int64 | |||
| case reflect.Float32: | |||
| p.enc = (*Buffer).enc_uint32 // can just treat them as bits | |||
| p.dec = (*Buffer).dec_int32 | |||
| p.size = size_uint32 | |||
| case reflect.Float64: | |||
| p.enc = (*Buffer).enc_int64 // can just treat them as bits | |||
| p.dec = (*Buffer).dec_int64 | |||
| p.size = size_int64 | |||
| case reflect.String: | |||
| p.enc = (*Buffer).enc_string | |||
| p.dec = (*Buffer).dec_string | |||
| p.size = size_string | |||
| case reflect.Struct: | |||
| p.stype = t1.Elem() | |||
| p.isMarshaler = isMarshaler(t1) | |||
| p.isUnmarshaler = isUnmarshaler(t1) | |||
| if p.Wire == "bytes" { | |||
| p.enc = (*Buffer).enc_struct_message | |||
| p.dec = (*Buffer).dec_struct_message | |||
| p.size = size_struct_message | |||
| } else { | |||
| p.enc = (*Buffer).enc_struct_group | |||
| p.dec = (*Buffer).dec_struct_group | |||
| p.size = size_struct_group | |||
| } | |||
| } | |||
| case reflect.Slice: | |||
| switch t2 := t1.Elem(); t2.Kind() { | |||
| default: | |||
| logNoSliceEnc(t1, t2) | |||
| break | |||
| case reflect.Bool: | |||
| if p.Packed { | |||
| p.enc = (*Buffer).enc_slice_packed_bool | |||
| p.size = size_slice_packed_bool | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_bool | |||
| p.size = size_slice_bool | |||
| } | |||
| p.dec = (*Buffer).dec_slice_bool | |||
| p.packedDec = (*Buffer).dec_slice_packed_bool | |||
| case reflect.Int32: | |||
| if p.Packed { | |||
| p.enc = (*Buffer).enc_slice_packed_int32 | |||
| p.size = size_slice_packed_int32 | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_int32 | |||
| p.size = size_slice_int32 | |||
| } | |||
| p.dec = (*Buffer).dec_slice_int32 | |||
| p.packedDec = (*Buffer).dec_slice_packed_int32 | |||
| case reflect.Uint32: | |||
| if p.Packed { | |||
| p.enc = (*Buffer).enc_slice_packed_uint32 | |||
| p.size = size_slice_packed_uint32 | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_uint32 | |||
| p.size = size_slice_uint32 | |||
| } | |||
| p.dec = (*Buffer).dec_slice_int32 | |||
| p.packedDec = (*Buffer).dec_slice_packed_int32 | |||
| case reflect.Int64, reflect.Uint64: | |||
| if p.Packed { | |||
| p.enc = (*Buffer).enc_slice_packed_int64 | |||
| p.size = size_slice_packed_int64 | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_int64 | |||
| p.size = size_slice_int64 | |||
| } | |||
| p.dec = (*Buffer).dec_slice_int64 | |||
| p.packedDec = (*Buffer).dec_slice_packed_int64 | |||
| case reflect.Uint8: | |||
| p.enc = (*Buffer).enc_slice_byte | |||
| p.dec = (*Buffer).dec_slice_byte | |||
| p.size = size_slice_byte | |||
| // This is a []byte, which is either a bytes field, | |||
| // or the value of a map field. In the latter case, | |||
| // we always encode an empty []byte, so we should not | |||
| // use the proto3 enc/size funcs. | |||
| // f == nil iff this is the key/value of a map field. | |||
| if p.proto3 && f != nil { | |||
| p.enc = (*Buffer).enc_proto3_slice_byte | |||
| p.size = size_proto3_slice_byte | |||
| } | |||
| case reflect.Float32, reflect.Float64: | |||
| switch t2.Bits() { | |||
| case 32: | |||
| // can just treat them as bits | |||
| if p.Packed { | |||
| p.enc = (*Buffer).enc_slice_packed_uint32 | |||
| p.size = size_slice_packed_uint32 | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_uint32 | |||
| p.size = size_slice_uint32 | |||
| } | |||
| p.dec = (*Buffer).dec_slice_int32 | |||
| p.packedDec = (*Buffer).dec_slice_packed_int32 | |||
| case 64: | |||
| // can just treat them as bits | |||
| if p.Packed { | |||
| p.enc = (*Buffer).enc_slice_packed_int64 | |||
| p.size = size_slice_packed_int64 | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_int64 | |||
| p.size = size_slice_int64 | |||
| } | |||
| p.dec = (*Buffer).dec_slice_int64 | |||
| p.packedDec = (*Buffer).dec_slice_packed_int64 | |||
| default: | |||
| logNoSliceEnc(t1, t2) | |||
| break | |||
| } | |||
| case reflect.String: | |||
| p.enc = (*Buffer).enc_slice_string | |||
| p.dec = (*Buffer).dec_slice_string | |||
| p.size = size_slice_string | |||
| case reflect.Ptr: | |||
| switch t3 := t2.Elem(); t3.Kind() { | |||
| default: | |||
| fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) | |||
| break | |||
| case reflect.Struct: | |||
| p.stype = t2.Elem() | |||
| p.isMarshaler = isMarshaler(t2) | |||
| p.isUnmarshaler = isUnmarshaler(t2) | |||
| if p.Wire == "bytes" { | |||
| p.enc = (*Buffer).enc_slice_struct_message | |||
| p.dec = (*Buffer).dec_slice_struct_message | |||
| p.size = size_slice_struct_message | |||
| } else { | |||
| p.enc = (*Buffer).enc_slice_struct_group | |||
| p.dec = (*Buffer).dec_slice_struct_group | |||
| p.size = size_slice_struct_group | |||
| } | |||
| } | |||
| case reflect.Slice: | |||
| switch t2.Elem().Kind() { | |||
| default: | |||
| fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) | |||
| break | |||
| case reflect.Uint8: | |||
| p.enc = (*Buffer).enc_slice_slice_byte | |||
| p.dec = (*Buffer).dec_slice_slice_byte | |||
| p.size = size_slice_slice_byte | |||
| } | |||
| } | |||
| case reflect.Map: | |||
| p.enc = (*Buffer).enc_new_map | |||
| p.dec = (*Buffer).dec_new_map | |||
| p.size = size_new_map | |||
| p.mtype = t1 | |||
| p.mkeyprop = &Properties{} | |||
| p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) | |||
| p.mvalprop = &Properties{} | |||
| vtype := p.mtype.Elem() | |||
| if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { | |||
| // The value type is not a message (*T) or bytes ([]byte), | |||
| // so we need encoders for the pointer to this type. | |||
| vtype = reflect.PtrTo(vtype) | |||
| } | |||
| p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) | |||
| } | |||
| // precalculate tag code | |||
| wire := p.WireType | |||
| if p.Packed { | |||
| wire = WireBytes | |||
| } | |||
| x := uint32(p.Tag)<<3 | uint32(wire) | |||
| i := 0 | |||
| for i = 0; x > 127; i++ { | |||
| p.tagbuf[i] = 0x80 | uint8(x&0x7F) | |||
| x >>= 7 | |||
| } | |||
| p.tagbuf[i] = uint8(x) | |||
| p.tagcode = p.tagbuf[0 : i+1] | |||
| if p.stype != nil { | |||
| if lockGetProp { | |||
| p.sprop = GetProperties(p.stype) | |||
| } else { | |||
| p.sprop = getPropertiesLocked(p.stype) | |||
| } | |||
| } | |||
| } | |||
| var ( | |||
| marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() | |||
| unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() | |||
| ) | |||
| // isMarshaler reports whether type t implements Marshaler. | |||
| func isMarshaler(t reflect.Type) bool { | |||
| // We're checking for (likely) pointer-receiver methods | |||
| // so if t is not a pointer, something is very wrong. | |||
| // The calls above only invoke isMarshaler on pointer types. | |||
| if t.Kind() != reflect.Ptr { | |||
| panic("proto: misuse of isMarshaler") | |||
| } | |||
| return t.Implements(marshalerType) | |||
| } | |||
| // isUnmarshaler reports whether type t implements Unmarshaler. | |||
| func isUnmarshaler(t reflect.Type) bool { | |||
| // We're checking for (likely) pointer-receiver methods | |||
| // so if t is not a pointer, something is very wrong. | |||
| // The calls above only invoke isUnmarshaler on pointer types. | |||
| if t.Kind() != reflect.Ptr { | |||
| panic("proto: misuse of isUnmarshaler") | |||
| } | |||
| return t.Implements(unmarshalerType) | |||
| } | |||
| // Init populates the properties from a protocol buffer struct tag. | |||
| func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { | |||
| p.init(typ, name, tag, f, true) | |||
| } | |||
| func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { | |||
| // "bytes,49,opt,def=hello!" | |||
| p.Name = name | |||
| p.OrigName = name | |||
| if f != nil { | |||
| p.field = toField(f) | |||
| } | |||
| if tag == "" { | |||
| return | |||
| } | |||
| p.Parse(tag) | |||
| p.setEncAndDec(typ, f, lockGetProp) | |||
| } | |||
| var ( | |||
| propertiesMu sync.RWMutex | |||
| propertiesMap = make(map[reflect.Type]*StructProperties) | |||
| ) | |||
| // GetProperties returns the list of properties for the type represented by t. | |||
| // t must represent a generated struct type of a protocol message. | |||
| func GetProperties(t reflect.Type) *StructProperties { | |||
| if t.Kind() != reflect.Struct { | |||
| panic("proto: type must have kind struct") | |||
| } | |||
| // Most calls to GetProperties in a long-running program will be | |||
| // retrieving details for types we have seen before. | |||
| propertiesMu.RLock() | |||
| sprop, ok := propertiesMap[t] | |||
| propertiesMu.RUnlock() | |||
| if ok { | |||
| if collectStats { | |||
| stats.Chit++ | |||
| } | |||
| return sprop | |||
| } | |||
| propertiesMu.Lock() | |||
| sprop = getPropertiesLocked(t) | |||
| propertiesMu.Unlock() | |||
| return sprop | |||
| } | |||
| // getPropertiesLocked requires that propertiesMu is held. | |||
| func getPropertiesLocked(t reflect.Type) *StructProperties { | |||
| if prop, ok := propertiesMap[t]; ok { | |||
| if collectStats { | |||
| stats.Chit++ | |||
| } | |||
| return prop | |||
| } | |||
| if collectStats { | |||
| stats.Cmiss++ | |||
| } | |||
| prop := new(StructProperties) | |||
| // in case of recursive protos, fill this in now. | |||
| propertiesMap[t] = prop | |||
| // build properties | |||
| prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) | |||
| prop.unrecField = invalidField | |||
| prop.Prop = make([]*Properties, t.NumField()) | |||
| prop.order = make([]int, t.NumField()) | |||
| for i := 0; i < t.NumField(); i++ { | |||
| f := t.Field(i) | |||
| p := new(Properties) | |||
| name := f.Name | |||
| p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) | |||
| if f.Name == "XXX_extensions" { // special case | |||
| p.enc = (*Buffer).enc_map | |||
| p.dec = nil // not needed | |||
| p.size = size_map | |||
| } | |||
| if f.Name == "XXX_unrecognized" { // special case | |||
| prop.unrecField = toField(&f) | |||
| } | |||
| oneof := f.Tag.Get("protobuf_oneof") != "" // special case | |||
| prop.Prop[i] = p | |||
| prop.order[i] = i | |||
| if debug { | |||
| print(i, " ", f.Name, " ", t.String(), " ") | |||
| if p.Tag > 0 { | |||
| print(p.String()) | |||
| } | |||
| print("\n") | |||
| } | |||
| if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { | |||
| fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") | |||
| } | |||
| } | |||
| // Re-order prop.order. | |||
| sort.Sort(prop) | |||
| type oneofMessage interface { | |||
| XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) | |||
| } | |||
| if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { | |||
| var oots []interface{} | |||
| prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() | |||
| prop.stype = t | |||
| // Interpret oneof metadata. | |||
| prop.OneofTypes = make(map[string]*OneofProperties) | |||
| for _, oot := range oots { | |||
| oop := &OneofProperties{ | |||
| Type: reflect.ValueOf(oot).Type(), // *T | |||
| Prop: new(Properties), | |||
| } | |||
| sft := oop.Type.Elem().Field(0) | |||
| oop.Prop.Name = sft.Name | |||
| oop.Prop.Parse(sft.Tag.Get("protobuf")) | |||
| // There will be exactly one interface field that | |||
| // this new value is assignable to. | |||
| for i := 0; i < t.NumField(); i++ { | |||
| f := t.Field(i) | |||
| if f.Type.Kind() != reflect.Interface { | |||
| continue | |||
| } | |||
| if !oop.Type.AssignableTo(f.Type) { | |||
| continue | |||
| } | |||
| oop.Field = i | |||
| break | |||
| } | |||
| prop.OneofTypes[oop.Prop.OrigName] = oop | |||
| } | |||
| } | |||
| // build required counts | |||
| // build tags | |||
| reqCount := 0 | |||
| prop.decoderOrigNames = make(map[string]int) | |||
| for i, p := range prop.Prop { | |||
| if strings.HasPrefix(p.Name, "XXX_") { | |||
| // Internal fields should not appear in tags/origNames maps. | |||
| // They are handled specially when encoding and decoding. | |||
| continue | |||
| } | |||
| if p.Required { | |||
| reqCount++ | |||
| } | |||
| prop.decoderTags.put(p.Tag, i) | |||
| prop.decoderOrigNames[p.OrigName] = i | |||
| } | |||
| prop.reqCount = reqCount | |||
| return prop | |||
| } | |||
| // Return the Properties object for the x[0]'th field of the structure. | |||
| func propByIndex(t reflect.Type, x []int) *Properties { | |||
| if len(x) != 1 { | |||
| fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) | |||
| return nil | |||
| } | |||
| prop := GetProperties(t) | |||
| return prop.Prop[x[0]] | |||
| } | |||
| // Get the address and type of a pointer to a struct from an interface. | |||
| func getbase(pb Message) (t reflect.Type, b structPointer, err error) { | |||
| if pb == nil { | |||
| err = ErrNil | |||
| return | |||
| } | |||
| // get the reflect type of the pointer to the struct. | |||
| t = reflect.TypeOf(pb) | |||
| // get the address of the struct. | |||
| value := reflect.ValueOf(pb) | |||
| b = toStructPointer(value) | |||
| return | |||
| } | |||
| // A global registry of enum types. | |||
| // The generated code will register the generated maps by calling RegisterEnum. | |||
| var enumValueMaps = make(map[string]map[string]int32) | |||
| // RegisterEnum is called from the generated code to install the enum descriptor | |||
| // maps into the global table to aid parsing text format protocol buffers. | |||
| func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { | |||
| if _, ok := enumValueMaps[typeName]; ok { | |||
| panic("proto: duplicate enum registered: " + typeName) | |||
| } | |||
| enumValueMaps[typeName] = valueMap | |||
| } | |||
| // EnumValueMap returns the mapping from names to integers of the | |||
| // enum type enumType, or a nil if not found. | |||
| func EnumValueMap(enumType string) map[string]int32 { | |||
| return enumValueMaps[enumType] | |||
| } | |||
| // A registry of all linked message types. | |||
| // The string is a fully-qualified proto name ("pkg.Message"). | |||
| var ( | |||
| protoTypes = make(map[string]reflect.Type) | |||
| revProtoTypes = make(map[reflect.Type]string) | |||
| ) | |||
| // RegisterType is called from generated code and maps from the fully qualified | |||
| // proto name to the type (pointer to struct) of the protocol buffer. | |||
| func RegisterType(x Message, name string) { | |||
| if _, ok := protoTypes[name]; ok { | |||
| // TODO: Some day, make this a panic. | |||
| log.Printf("proto: duplicate proto type registered: %s", name) | |||
| return | |||
| } | |||
| t := reflect.TypeOf(x) | |||
| protoTypes[name] = t | |||
| revProtoTypes[t] = name | |||
| } | |||
| // MessageName returns the fully-qualified proto name for the given message type. | |||
| func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } | |||
| // MessageType returns the message type (pointer to struct) for a named message. | |||
| func MessageType(name string) reflect.Type { return protoTypes[name] } | |||
| @@ -0,0 +1,849 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| package proto | |||
| // Functions for writing the text protocol buffer format. | |||
| import ( | |||
| "bufio" | |||
| "bytes" | |||
| "encoding" | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "log" | |||
| "math" | |||
| "reflect" | |||
| "sort" | |||
| "strings" | |||
| ) | |||
| var ( | |||
| newline = []byte("\n") | |||
| spaces = []byte(" ") | |||
| gtNewline = []byte(">\n") | |||
| endBraceNewline = []byte("}\n") | |||
| backslashN = []byte{'\\', 'n'} | |||
| backslashR = []byte{'\\', 'r'} | |||
| backslashT = []byte{'\\', 't'} | |||
| backslashDQ = []byte{'\\', '"'} | |||
| backslashBS = []byte{'\\', '\\'} | |||
| posInf = []byte("inf") | |||
| negInf = []byte("-inf") | |||
| nan = []byte("nan") | |||
| ) | |||
| type writer interface { | |||
| io.Writer | |||
| WriteByte(byte) error | |||
| } | |||
| // textWriter is an io.Writer that tracks its indentation level. | |||
| type textWriter struct { | |||
| ind int | |||
| complete bool // if the current position is a complete line | |||
| compact bool // whether to write out as a one-liner | |||
| w writer | |||
| } | |||
| func (w *textWriter) WriteString(s string) (n int, err error) { | |||
| if !strings.Contains(s, "\n") { | |||
| if !w.compact && w.complete { | |||
| w.writeIndent() | |||
| } | |||
| w.complete = false | |||
| return io.WriteString(w.w, s) | |||
| } | |||
| // WriteString is typically called without newlines, so this | |||
| // codepath and its copy are rare. We copy to avoid | |||
| // duplicating all of Write's logic here. | |||
| return w.Write([]byte(s)) | |||
| } | |||
| func (w *textWriter) Write(p []byte) (n int, err error) { | |||
| newlines := bytes.Count(p, newline) | |||
| if newlines == 0 { | |||
| if !w.compact && w.complete { | |||
| w.writeIndent() | |||
| } | |||
| n, err = w.w.Write(p) | |||
| w.complete = false | |||
| return n, err | |||
| } | |||
| frags := bytes.SplitN(p, newline, newlines+1) | |||
| if w.compact { | |||
| for i, frag := range frags { | |||
| if i > 0 { | |||
| if err := w.w.WriteByte(' '); err != nil { | |||
| return n, err | |||
| } | |||
| n++ | |||
| } | |||
| nn, err := w.w.Write(frag) | |||
| n += nn | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| } | |||
| return n, nil | |||
| } | |||
| for i, frag := range frags { | |||
| if w.complete { | |||
| w.writeIndent() | |||
| } | |||
| nn, err := w.w.Write(frag) | |||
| n += nn | |||
| if err != nil { | |||
| return n, err | |||
| } | |||
| if i+1 < len(frags) { | |||
| if err := w.w.WriteByte('\n'); err != nil { | |||
| return n, err | |||
| } | |||
| n++ | |||
| } | |||
| } | |||
| w.complete = len(frags[len(frags)-1]) == 0 | |||
| return n, nil | |||
| } | |||
| func (w *textWriter) WriteByte(c byte) error { | |||
| if w.compact && c == '\n' { | |||
| c = ' ' | |||
| } | |||
| if !w.compact && w.complete { | |||
| w.writeIndent() | |||
| } | |||
| err := w.w.WriteByte(c) | |||
| w.complete = c == '\n' | |||
| return err | |||
| } | |||
| func (w *textWriter) indent() { w.ind++ } | |||
| func (w *textWriter) unindent() { | |||
| if w.ind == 0 { | |||
| log.Printf("proto: textWriter unindented too far") | |||
| return | |||
| } | |||
| w.ind-- | |||
| } | |||
| func writeName(w *textWriter, props *Properties) error { | |||
| if _, err := w.WriteString(props.OrigName); err != nil { | |||
| return err | |||
| } | |||
| if props.Wire != "group" { | |||
| return w.WriteByte(':') | |||
| } | |||
| return nil | |||
| } | |||
| // raw is the interface satisfied by RawMessage. | |||
| type raw interface { | |||
| Bytes() []byte | |||
| } | |||
| func requiresQuotes(u string) bool { | |||
| // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. | |||
| for _, ch := range u { | |||
| switch { | |||
| case ch == '.' || ch == '/' || ch == '_': | |||
| continue | |||
| case '0' <= ch && ch <= '9': | |||
| continue | |||
| case 'A' <= ch && ch <= 'Z': | |||
| continue | |||
| case 'a' <= ch && ch <= 'z': | |||
| continue | |||
| default: | |||
| return true | |||
| } | |||
| } | |||
| return false | |||
| } | |||
| // isAny reports whether sv is a google.protobuf.Any message | |||
| func isAny(sv reflect.Value) bool { | |||
| type wkt interface { | |||
| XXX_WellKnownType() string | |||
| } | |||
| t, ok := sv.Addr().Interface().(wkt) | |||
| return ok && t.XXX_WellKnownType() == "Any" | |||
| } | |||
| // writeProto3Any writes an expanded google.protobuf.Any message. | |||
| // | |||
| // It returns (false, nil) if sv value can't be unmarshaled (e.g. because | |||
| // required messages are not linked in). | |||
| // | |||
| // It returns (true, error) when sv was written in expanded format or an error | |||
| // was encountered. | |||
| func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { | |||
| turl := sv.FieldByName("TypeUrl") | |||
| val := sv.FieldByName("Value") | |||
| if !turl.IsValid() || !val.IsValid() { | |||
| return true, errors.New("proto: invalid google.protobuf.Any message") | |||
| } | |||
| b, ok := val.Interface().([]byte) | |||
| if !ok { | |||
| return true, errors.New("proto: invalid google.protobuf.Any message") | |||
| } | |||
| parts := strings.Split(turl.String(), "/") | |||
| mt := MessageType(parts[len(parts)-1]) | |||
| if mt == nil { | |||
| return false, nil | |||
| } | |||
| m := reflect.New(mt.Elem()) | |||
| if err := Unmarshal(b, m.Interface().(Message)); err != nil { | |||
| return false, nil | |||
| } | |||
| w.Write([]byte("[")) | |||
| u := turl.String() | |||
| if requiresQuotes(u) { | |||
| writeString(w, u) | |||
| } else { | |||
| w.Write([]byte(u)) | |||
| } | |||
| if w.compact { | |||
| w.Write([]byte("]:<")) | |||
| } else { | |||
| w.Write([]byte("]: <\n")) | |||
| w.ind++ | |||
| } | |||
| if err := tm.writeStruct(w, m.Elem()); err != nil { | |||
| return true, err | |||
| } | |||
| if w.compact { | |||
| w.Write([]byte("> ")) | |||
| } else { | |||
| w.ind-- | |||
| w.Write([]byte(">\n")) | |||
| } | |||
| return true, nil | |||
| } | |||
| func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { | |||
| if tm.ExpandAny && isAny(sv) { | |||
| if canExpand, err := tm.writeProto3Any(w, sv); canExpand { | |||
| return err | |||
| } | |||
| } | |||
| st := sv.Type() | |||
| sprops := GetProperties(st) | |||
| for i := 0; i < sv.NumField(); i++ { | |||
| fv := sv.Field(i) | |||
| props := sprops.Prop[i] | |||
| name := st.Field(i).Name | |||
| if strings.HasPrefix(name, "XXX_") { | |||
| // There are two XXX_ fields: | |||
| // XXX_unrecognized []byte | |||
| // XXX_extensions map[int32]proto.Extension | |||
| // The first is handled here; | |||
| // the second is handled at the bottom of this function. | |||
| if name == "XXX_unrecognized" && !fv.IsNil() { | |||
| if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| if fv.Kind() == reflect.Ptr && fv.IsNil() { | |||
| // Field not filled in. This could be an optional field or | |||
| // a required field that wasn't filled in. Either way, there | |||
| // isn't anything we can show for it. | |||
| continue | |||
| } | |||
| if fv.Kind() == reflect.Slice && fv.IsNil() { | |||
| // Repeated field that is empty, or a bytes field that is unused. | |||
| continue | |||
| } | |||
| if props.Repeated && fv.Kind() == reflect.Slice { | |||
| // Repeated field. | |||
| for j := 0; j < fv.Len(); j++ { | |||
| if err := writeName(w, props); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| v := fv.Index(j) | |||
| if v.Kind() == reflect.Ptr && v.IsNil() { | |||
| // A nil message in a repeated field is not valid, | |||
| // but we can handle that more gracefully than panicking. | |||
| if _, err := w.Write([]byte("<nil>\n")); err != nil { | |||
| return err | |||
| } | |||
| continue | |||
| } | |||
| if err := tm.writeAny(w, v, props); err != nil { | |||
| return err | |||
| } | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| if fv.Kind() == reflect.Map { | |||
| // Map fields are rendered as a repeated struct with key/value fields. | |||
| keys := fv.MapKeys() | |||
| sort.Sort(mapKeys(keys)) | |||
| for _, key := range keys { | |||
| val := fv.MapIndex(key) | |||
| if err := writeName(w, props); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| // open struct | |||
| if err := w.WriteByte('<'); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| w.indent() | |||
| // key | |||
| if _, err := w.WriteString("key:"); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if err := tm.writeAny(w, key, props.mkeyprop); err != nil { | |||
| return err | |||
| } | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| // nil values aren't legal, but we can avoid panicking because of them. | |||
| if val.Kind() != reflect.Ptr || !val.IsNil() { | |||
| // value | |||
| if _, err := w.WriteString("value:"); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if err := tm.writeAny(w, val, props.mvalprop); err != nil { | |||
| return err | |||
| } | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| // close struct | |||
| w.unindent() | |||
| if err := w.WriteByte('>'); err != nil { | |||
| return err | |||
| } | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { | |||
| // empty bytes field | |||
| continue | |||
| } | |||
| if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { | |||
| // proto3 non-repeated scalar field; skip if zero value | |||
| if isProto3Zero(fv) { | |||
| continue | |||
| } | |||
| } | |||
| if fv.Kind() == reflect.Interface { | |||
| // Check if it is a oneof. | |||
| if st.Field(i).Tag.Get("protobuf_oneof") != "" { | |||
| // fv is nil, or holds a pointer to generated struct. | |||
| // That generated struct has exactly one field, | |||
| // which has a protobuf struct tag. | |||
| if fv.IsNil() { | |||
| continue | |||
| } | |||
| inner := fv.Elem().Elem() // interface -> *T -> T | |||
| tag := inner.Type().Field(0).Tag.Get("protobuf") | |||
| props = new(Properties) // Overwrite the outer props var, but not its pointee. | |||
| props.Parse(tag) | |||
| // Write the value in the oneof, not the oneof itself. | |||
| fv = inner.Field(0) | |||
| // Special case to cope with malformed messages gracefully: | |||
| // If the value in the oneof is a nil pointer, don't panic | |||
| // in writeAny. | |||
| if fv.Kind() == reflect.Ptr && fv.IsNil() { | |||
| // Use errors.New so writeAny won't render quotes. | |||
| msg := errors.New("/* nil */") | |||
| fv = reflect.ValueOf(&msg).Elem() | |||
| } | |||
| } | |||
| } | |||
| if err := writeName(w, props); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if b, ok := fv.Interface().(raw); ok { | |||
| if err := writeRaw(w, b.Bytes()); err != nil { | |||
| return err | |||
| } | |||
| continue | |||
| } | |||
| // Enums have a String method, so writeAny will work fine. | |||
| if err := tm.writeAny(w, fv, props); err != nil { | |||
| return err | |||
| } | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| // Extensions (the XXX_extensions field). | |||
| pv := sv.Addr() | |||
| if pv.Type().Implements(extendableProtoType) { | |||
| if err := tm.writeExtensions(w, pv); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // writeRaw writes an uninterpreted raw message. | |||
| func writeRaw(w *textWriter, b []byte) error { | |||
| if err := w.WriteByte('<'); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| w.indent() | |||
| if err := writeUnknownStruct(w, b); err != nil { | |||
| return err | |||
| } | |||
| w.unindent() | |||
| if err := w.WriteByte('>'); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // writeAny writes an arbitrary field. | |||
| func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { | |||
| v = reflect.Indirect(v) | |||
| // Floats have special cases. | |||
| if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { | |||
| x := v.Float() | |||
| var b []byte | |||
| switch { | |||
| case math.IsInf(x, 1): | |||
| b = posInf | |||
| case math.IsInf(x, -1): | |||
| b = negInf | |||
| case math.IsNaN(x): | |||
| b = nan | |||
| } | |||
| if b != nil { | |||
| _, err := w.Write(b) | |||
| return err | |||
| } | |||
| // Other values are handled below. | |||
| } | |||
| // We don't attempt to serialise every possible value type; only those | |||
| // that can occur in protocol buffers. | |||
| switch v.Kind() { | |||
| case reflect.Slice: | |||
| // Should only be a []byte; repeated fields are handled in writeStruct. | |||
| if err := writeString(w, string(v.Interface().([]byte))); err != nil { | |||
| return err | |||
| } | |||
| case reflect.String: | |||
| if err := writeString(w, v.String()); err != nil { | |||
| return err | |||
| } | |||
| case reflect.Struct: | |||
| // Required/optional group/message. | |||
| var bra, ket byte = '<', '>' | |||
| if props != nil && props.Wire == "group" { | |||
| bra, ket = '{', '}' | |||
| } | |||
| if err := w.WriteByte(bra); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| w.indent() | |||
| if etm, ok := v.Interface().(encoding.TextMarshaler); ok { | |||
| text, err := etm.MarshalText() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if _, err = w.Write(text); err != nil { | |||
| return err | |||
| } | |||
| } else if err := tm.writeStruct(w, v); err != nil { | |||
| return err | |||
| } | |||
| w.unindent() | |||
| if err := w.WriteByte(ket); err != nil { | |||
| return err | |||
| } | |||
| default: | |||
| _, err := fmt.Fprint(w, v.Interface()) | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| // equivalent to C's isprint. | |||
| func isprint(c byte) bool { | |||
| return c >= 0x20 && c < 0x7f | |||
| } | |||
| // writeString writes a string in the protocol buffer text format. | |||
| // It is similar to strconv.Quote except we don't use Go escape sequences, | |||
| // we treat the string as a byte sequence, and we use octal escapes. | |||
| // These differences are to maintain interoperability with the other | |||
| // languages' implementations of the text format. | |||
| func writeString(w *textWriter, s string) error { | |||
| // use WriteByte here to get any needed indent | |||
| if err := w.WriteByte('"'); err != nil { | |||
| return err | |||
| } | |||
| // Loop over the bytes, not the runes. | |||
| for i := 0; i < len(s); i++ { | |||
| var err error | |||
| // Divergence from C++: we don't escape apostrophes. | |||
| // There's no need to escape them, and the C++ parser | |||
| // copes with a naked apostrophe. | |||
| switch c := s[i]; c { | |||
| case '\n': | |||
| _, err = w.w.Write(backslashN) | |||
| case '\r': | |||
| _, err = w.w.Write(backslashR) | |||
| case '\t': | |||
| _, err = w.w.Write(backslashT) | |||
| case '"': | |||
| _, err = w.w.Write(backslashDQ) | |||
| case '\\': | |||
| _, err = w.w.Write(backslashBS) | |||
| default: | |||
| if isprint(c) { | |||
| err = w.w.WriteByte(c) | |||
| } else { | |||
| _, err = fmt.Fprintf(w.w, "\\%03o", c) | |||
| } | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return w.WriteByte('"') | |||
| } | |||
| func writeUnknownStruct(w *textWriter, data []byte) (err error) { | |||
| if !w.compact { | |||
| if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| b := NewBuffer(data) | |||
| for b.index < len(b.buf) { | |||
| x, err := b.DecodeVarint() | |||
| if err != nil { | |||
| _, err := fmt.Fprintf(w, "/* %v */\n", err) | |||
| return err | |||
| } | |||
| wire, tag := x&7, x>>3 | |||
| if wire == WireEndGroup { | |||
| w.unindent() | |||
| if _, err := w.Write(endBraceNewline); err != nil { | |||
| return err | |||
| } | |||
| continue | |||
| } | |||
| if _, err := fmt.Fprint(w, tag); err != nil { | |||
| return err | |||
| } | |||
| if wire != WireStartGroup { | |||
| if err := w.WriteByte(':'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if !w.compact || wire == WireStartGroup { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| switch wire { | |||
| case WireBytes: | |||
| buf, e := b.DecodeRawBytes(false) | |||
| if e == nil { | |||
| _, err = fmt.Fprintf(w, "%q", buf) | |||
| } else { | |||
| _, err = fmt.Fprintf(w, "/* %v */", e) | |||
| } | |||
| case WireFixed32: | |||
| x, err = b.DecodeFixed32() | |||
| err = writeUnknownInt(w, x, err) | |||
| case WireFixed64: | |||
| x, err = b.DecodeFixed64() | |||
| err = writeUnknownInt(w, x, err) | |||
| case WireStartGroup: | |||
| err = w.WriteByte('{') | |||
| w.indent() | |||
| case WireVarint: | |||
| x, err = b.DecodeVarint() | |||
| err = writeUnknownInt(w, x, err) | |||
| default: | |||
| _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if err = w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func writeUnknownInt(w *textWriter, x uint64, err error) error { | |||
| if err == nil { | |||
| _, err = fmt.Fprint(w, x) | |||
| } else { | |||
| _, err = fmt.Fprintf(w, "/* %v */", err) | |||
| } | |||
| return err | |||
| } | |||
| type int32Slice []int32 | |||
| func (s int32Slice) Len() int { return len(s) } | |||
| func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } | |||
| func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |||
| // writeExtensions writes all the extensions in pv. | |||
| // pv is assumed to be a pointer to a protocol message struct that is extendable. | |||
| func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { | |||
| emap := extensionMaps[pv.Type().Elem()] | |||
| ep := pv.Interface().(extendableProto) | |||
| // Order the extensions by ID. | |||
| // This isn't strictly necessary, but it will give us | |||
| // canonical output, which will also make testing easier. | |||
| m := ep.ExtensionMap() | |||
| ids := make([]int32, 0, len(m)) | |||
| for id := range m { | |||
| ids = append(ids, id) | |||
| } | |||
| sort.Sort(int32Slice(ids)) | |||
| for _, extNum := range ids { | |||
| ext := m[extNum] | |||
| var desc *ExtensionDesc | |||
| if emap != nil { | |||
| desc = emap[extNum] | |||
| } | |||
| if desc == nil { | |||
| // Unknown extension. | |||
| if err := writeUnknownStruct(w, ext.enc); err != nil { | |||
| return err | |||
| } | |||
| continue | |||
| } | |||
| pb, err := GetExtension(ep, desc) | |||
| if err != nil { | |||
| return fmt.Errorf("failed getting extension: %v", err) | |||
| } | |||
| // Repeated extensions will appear as a slice. | |||
| if !desc.repeated() { | |||
| if err := tm.writeExtension(w, desc.Name, pb); err != nil { | |||
| return err | |||
| } | |||
| } else { | |||
| v := reflect.ValueOf(pb) | |||
| for i := 0; i < v.Len(); i++ { | |||
| if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { | |||
| if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { | |||
| return err | |||
| } | |||
| if !w.compact { | |||
| if err := w.WriteByte(' '); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { | |||
| return err | |||
| } | |||
| if err := w.WriteByte('\n'); err != nil { | |||
| return err | |||
| } | |||
| return nil | |||
| } | |||
| func (w *textWriter) writeIndent() { | |||
| if !w.complete { | |||
| return | |||
| } | |||
| remain := w.ind * 2 | |||
| for remain > 0 { | |||
| n := remain | |||
| if n > len(spaces) { | |||
| n = len(spaces) | |||
| } | |||
| w.w.Write(spaces[:n]) | |||
| remain -= n | |||
| } | |||
| w.complete = false | |||
| } | |||
| // TextMarshaler is a configurable text format marshaler. | |||
| type TextMarshaler struct { | |||
| Compact bool // use compact text format (one line). | |||
| ExpandAny bool // expand google.protobuf.Any messages of known types | |||
| } | |||
| // Marshal writes a given protocol buffer in text format. | |||
| // The only errors returned are from w. | |||
| func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { | |||
| val := reflect.ValueOf(pb) | |||
| if pb == nil || val.IsNil() { | |||
| w.Write([]byte("<nil>")) | |||
| return nil | |||
| } | |||
| var bw *bufio.Writer | |||
| ww, ok := w.(writer) | |||
| if !ok { | |||
| bw = bufio.NewWriter(w) | |||
| ww = bw | |||
| } | |||
| aw := &textWriter{ | |||
| w: ww, | |||
| complete: true, | |||
| compact: tm.Compact, | |||
| } | |||
| if etm, ok := pb.(encoding.TextMarshaler); ok { | |||
| text, err := etm.MarshalText() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if _, err = aw.Write(text); err != nil { | |||
| return err | |||
| } | |||
| if bw != nil { | |||
| return bw.Flush() | |||
| } | |||
| return nil | |||
| } | |||
| // Dereference the received pointer so we don't have outer < and >. | |||
| v := reflect.Indirect(val) | |||
| if err := tm.writeStruct(aw, v); err != nil { | |||
| return err | |||
| } | |||
| if bw != nil { | |||
| return bw.Flush() | |||
| } | |||
| return nil | |||
| } | |||
| // Text is the same as Marshal, but returns the string directly. | |||
| func (tm *TextMarshaler) Text(pb Message) string { | |||
| var buf bytes.Buffer | |||
| tm.Marshal(&buf, pb) | |||
| return buf.String() | |||
| } | |||
| var ( | |||
| defaultTextMarshaler = TextMarshaler{} | |||
| compactTextMarshaler = TextMarshaler{Compact: true} | |||
| ) | |||
| // TODO: consider removing some of the Marshal functions below. | |||
| // MarshalText writes a given protocol buffer in text format. | |||
| // The only errors returned are from w. | |||
| func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } | |||
| // MarshalTextString is the same as MarshalText, but returns the string directly. | |||
| func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } | |||
| // CompactText writes a given protocol buffer in compact text format (one line). | |||
| func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } | |||
| // CompactTextString is the same as CompactText, but returns the string directly. | |||
| func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } | |||
| @@ -0,0 +1,871 @@ | |||
| // Go support for Protocol Buffers - Google's data interchange format | |||
| // | |||
| // Copyright 2010 The Go Authors. All rights reserved. | |||
| // https://github.com/golang/protobuf | |||
| // | |||
| // Redistribution and use in source and binary forms, with or without | |||
| // modification, are permitted provided that the following conditions are | |||
| // met: | |||
| // | |||
| // * Redistributions of source code must retain the above copyright | |||
| // notice, this list of conditions and the following disclaimer. | |||
| // * Redistributions in binary form must reproduce the above | |||
| // copyright notice, this list of conditions and the following disclaimer | |||
| // in the documentation and/or other materials provided with the | |||
| // distribution. | |||
| // * Neither the name of Google Inc. nor the names of its | |||
| // contributors may be used to endorse or promote products derived from | |||
| // this software without specific prior written permission. | |||
| // | |||
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| package proto | |||
| // Functions for parsing the Text protocol buffer format. | |||
| // TODO: message sets. | |||
| import ( | |||
| "encoding" | |||
| "errors" | |||
| "fmt" | |||
| "reflect" | |||
| "strconv" | |||
| "strings" | |||
| "unicode/utf8" | |||
| ) | |||
| type ParseError struct { | |||
| Message string | |||
| Line int // 1-based line number | |||
| Offset int // 0-based byte offset from start of input | |||
| } | |||
| func (p *ParseError) Error() string { | |||
| if p.Line == 1 { | |||
| // show offset only for first line | |||
| return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) | |||
| } | |||
| return fmt.Sprintf("line %d: %v", p.Line, p.Message) | |||
| } | |||
| type token struct { | |||
| value string | |||
| err *ParseError | |||
| line int // line number | |||
| offset int // byte number from start of input, not start of line | |||
| unquoted string // the unquoted version of value, if it was a quoted string | |||
| } | |||
| func (t *token) String() string { | |||
| if t.err == nil { | |||
| return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) | |||
| } | |||
| return fmt.Sprintf("parse error: %v", t.err) | |||
| } | |||
| type textParser struct { | |||
| s string // remaining input | |||
| done bool // whether the parsing is finished (success or error) | |||
| backed bool // whether back() was called | |||
| offset, line int | |||
| cur token | |||
| } | |||
| func newTextParser(s string) *textParser { | |||
| p := new(textParser) | |||
| p.s = s | |||
| p.line = 1 | |||
| p.cur.line = 1 | |||
| return p | |||
| } | |||
| func (p *textParser) errorf(format string, a ...interface{}) *ParseError { | |||
| pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} | |||
| p.cur.err = pe | |||
| p.done = true | |||
| return pe | |||
| } | |||
| // Numbers and identifiers are matched by [-+._A-Za-z0-9] | |||
| func isIdentOrNumberChar(c byte) bool { | |||
| switch { | |||
| case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': | |||
| return true | |||
| case '0' <= c && c <= '9': | |||
| return true | |||
| } | |||
| switch c { | |||
| case '-', '+', '.', '_': | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| func isWhitespace(c byte) bool { | |||
| switch c { | |||
| case ' ', '\t', '\n', '\r': | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| func isQuote(c byte) bool { | |||
| switch c { | |||
| case '"', '\'': | |||
| return true | |||
| } | |||
| return false | |||
| } | |||
| func (p *textParser) skipWhitespace() { | |||
| i := 0 | |||
| for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { | |||
| if p.s[i] == '#' { | |||
| // comment; skip to end of line or input | |||
| for i < len(p.s) && p.s[i] != '\n' { | |||
| i++ | |||
| } | |||
| if i == len(p.s) { | |||
| break | |||
| } | |||
| } | |||
| if p.s[i] == '\n' { | |||
| p.line++ | |||
| } | |||
| i++ | |||
| } | |||
| p.offset += i | |||
| p.s = p.s[i:len(p.s)] | |||
| if len(p.s) == 0 { | |||
| p.done = true | |||
| } | |||
| } | |||
| func (p *textParser) advance() { | |||
| // Skip whitespace | |||
| p.skipWhitespace() | |||
| if p.done { | |||
| return | |||
| } | |||
| // Start of non-whitespace | |||
| p.cur.err = nil | |||
| p.cur.offset, p.cur.line = p.offset, p.line | |||
| p.cur.unquoted = "" | |||
| switch p.s[0] { | |||
| case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': | |||
| // Single symbol | |||
| p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] | |||
| case '"', '\'': | |||
| // Quoted string | |||
| i := 1 | |||
| for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { | |||
| if p.s[i] == '\\' && i+1 < len(p.s) { | |||
| // skip escaped char | |||
| i++ | |||
| } | |||
| i++ | |||
| } | |||
| if i >= len(p.s) || p.s[i] != p.s[0] { | |||
| p.errorf("unmatched quote") | |||
| return | |||
| } | |||
| unq, err := unquoteC(p.s[1:i], rune(p.s[0])) | |||
| if err != nil { | |||
| p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) | |||
| return | |||
| } | |||
| p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] | |||
| p.cur.unquoted = unq | |||
| default: | |||
| i := 0 | |||
| for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { | |||
| i++ | |||
| } | |||
| if i == 0 { | |||
| p.errorf("unexpected byte %#x", p.s[0]) | |||
| return | |||
| } | |||
| p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] | |||
| } | |||
| p.offset += len(p.cur.value) | |||
| } | |||
| var ( | |||
| errBadUTF8 = errors.New("proto: bad UTF-8") | |||
| errBadHex = errors.New("proto: bad hexadecimal") | |||
| ) | |||
| func unquoteC(s string, quote rune) (string, error) { | |||
| // This is based on C++'s tokenizer.cc. | |||
| // Despite its name, this is *not* parsing C syntax. | |||
| // For instance, "\0" is an invalid quoted string. | |||
| // Avoid allocation in trivial cases. | |||
| simple := true | |||
| for _, r := range s { | |||
| if r == '\\' || r == quote { | |||
| simple = false | |||
| break | |||
| } | |||
| } | |||
| if simple { | |||
| return s, nil | |||
| } | |||
| buf := make([]byte, 0, 3*len(s)/2) | |||
| for len(s) > 0 { | |||
| r, n := utf8.DecodeRuneInString(s) | |||
| if r == utf8.RuneError && n == 1 { | |||
| return "", errBadUTF8 | |||
| } | |||
| s = s[n:] | |||
| if r != '\\' { | |||
| if r < utf8.RuneSelf { | |||
| buf = append(buf, byte(r)) | |||
| } else { | |||
| buf = append(buf, string(r)...) | |||
| } | |||
| continue | |||
| } | |||
| ch, tail, err := unescape(s) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| buf = append(buf, ch...) | |||
| s = tail | |||
| } | |||
| return string(buf), nil | |||
| } | |||
| func unescape(s string) (ch string, tail string, err error) { | |||
| r, n := utf8.DecodeRuneInString(s) | |||
| if r == utf8.RuneError && n == 1 { | |||
| return "", "", errBadUTF8 | |||
| } | |||
| s = s[n:] | |||
| switch r { | |||
| case 'a': | |||
| return "\a", s, nil | |||
| case 'b': | |||
| return "\b", s, nil | |||
| case 'f': | |||
| return "\f", s, nil | |||
| case 'n': | |||
| return "\n", s, nil | |||
| case 'r': | |||
| return "\r", s, nil | |||
| case 't': | |||
| return "\t", s, nil | |||
| case 'v': | |||
| return "\v", s, nil | |||
| case '?': | |||
| return "?", s, nil // trigraph workaround | |||
| case '\'', '"', '\\': | |||
| return string(r), s, nil | |||
| case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': | |||
| if len(s) < 2 { | |||
| return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) | |||
| } | |||
| base := 8 | |||
| ss := s[:2] | |||
| s = s[2:] | |||
| if r == 'x' || r == 'X' { | |||
| base = 16 | |||
| } else { | |||
| ss = string(r) + ss | |||
| } | |||
| i, err := strconv.ParseUint(ss, base, 8) | |||
| if err != nil { | |||
| return "", "", err | |||
| } | |||
| return string([]byte{byte(i)}), s, nil | |||
| case 'u', 'U': | |||
| n := 4 | |||
| if r == 'U' { | |||
| n = 8 | |||
| } | |||
| if len(s) < n { | |||
| return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) | |||
| } | |||
| bs := make([]byte, n/2) | |||
| for i := 0; i < n; i += 2 { | |||
| a, ok1 := unhex(s[i]) | |||
| b, ok2 := unhex(s[i+1]) | |||
| if !ok1 || !ok2 { | |||
| return "", "", errBadHex | |||
| } | |||
| bs[i/2] = a<<4 | b | |||
| } | |||
| s = s[n:] | |||
| return string(bs), s, nil | |||
| } | |||
| return "", "", fmt.Errorf(`unknown escape \%c`, r) | |||
| } | |||
| // Adapted from src/pkg/strconv/quote.go. | |||
| func unhex(b byte) (v byte, ok bool) { | |||
| switch { | |||
| case '0' <= b && b <= '9': | |||
| return b - '0', true | |||
| case 'a' <= b && b <= 'f': | |||
| return b - 'a' + 10, true | |||
| case 'A' <= b && b <= 'F': | |||
| return b - 'A' + 10, true | |||
| } | |||
| return 0, false | |||
| } | |||
| // Back off the parser by one token. Can only be done between calls to next(). | |||
| // It makes the next advance() a no-op. | |||
| func (p *textParser) back() { p.backed = true } | |||
| // Advances the parser and returns the new current token. | |||
| func (p *textParser) next() *token { | |||
| if p.backed || p.done { | |||
| p.backed = false | |||
| return &p.cur | |||
| } | |||
| p.advance() | |||
| if p.done { | |||
| p.cur.value = "" | |||
| } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { | |||
| // Look for multiple quoted strings separated by whitespace, | |||
| // and concatenate them. | |||
| cat := p.cur | |||
| for { | |||
| p.skipWhitespace() | |||
| if p.done || !isQuote(p.s[0]) { | |||
| break | |||
| } | |||
| p.advance() | |||
| if p.cur.err != nil { | |||
| return &p.cur | |||
| } | |||
| cat.value += " " + p.cur.value | |||
| cat.unquoted += p.cur.unquoted | |||
| } | |||
| p.done = false // parser may have seen EOF, but we want to return cat | |||
| p.cur = cat | |||
| } | |||
| return &p.cur | |||
| } | |||
| func (p *textParser) consumeToken(s string) error { | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| if tok.value != s { | |||
| p.back() | |||
| return p.errorf("expected %q, found %q", s, tok.value) | |||
| } | |||
| return nil | |||
| } | |||
| // Return a RequiredNotSetError indicating which required field was not set. | |||
| func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { | |||
| st := sv.Type() | |||
| sprops := GetProperties(st) | |||
| for i := 0; i < st.NumField(); i++ { | |||
| if !isNil(sv.Field(i)) { | |||
| continue | |||
| } | |||
| props := sprops.Prop[i] | |||
| if props.Required { | |||
| return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} | |||
| } | |||
| } | |||
| return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen | |||
| } | |||
| // Returns the index in the struct for the named field, as well as the parsed tag properties. | |||
| func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { | |||
| i, ok := sprops.decoderOrigNames[name] | |||
| if ok { | |||
| return i, sprops.Prop[i], true | |||
| } | |||
| return -1, nil, false | |||
| } | |||
| // Consume a ':' from the input stream (if the next token is a colon), | |||
| // returning an error if a colon is needed but not present. | |||
| func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| if tok.value != ":" { | |||
| // Colon is optional when the field is a group or message. | |||
| needColon := true | |||
| switch props.Wire { | |||
| case "group": | |||
| needColon = false | |||
| case "bytes": | |||
| // A "bytes" field is either a message, a string, or a repeated field; | |||
| // those three become *T, *string and []T respectively, so we can check for | |||
| // this field being a pointer to a non-string. | |||
| if typ.Kind() == reflect.Ptr { | |||
| // *T or *string | |||
| if typ.Elem().Kind() == reflect.String { | |||
| break | |||
| } | |||
| } else if typ.Kind() == reflect.Slice { | |||
| // []T or []*T | |||
| if typ.Elem().Kind() != reflect.Ptr { | |||
| break | |||
| } | |||
| } else if typ.Kind() == reflect.String { | |||
| // The proto3 exception is for a string field, | |||
| // which requires a colon. | |||
| break | |||
| } | |||
| needColon = false | |||
| } | |||
| if needColon { | |||
| return p.errorf("expected ':', found %q", tok.value) | |||
| } | |||
| p.back() | |||
| } | |||
| return nil | |||
| } | |||
| func (p *textParser) readStruct(sv reflect.Value, terminator string) error { | |||
| st := sv.Type() | |||
| sprops := GetProperties(st) | |||
| reqCount := sprops.reqCount | |||
| var reqFieldErr error | |||
| fieldSet := make(map[string]bool) | |||
| // A struct is a sequence of "name: value", terminated by one of | |||
| // '>' or '}', or the end of the input. A name may also be | |||
| // "[extension]" or "[type/url]". | |||
| // | |||
| // The whole struct can also be an expanded Any message, like: | |||
| // [type/url] < ... struct contents ... > | |||
| for { | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| if tok.value == terminator { | |||
| break | |||
| } | |||
| if tok.value == "[" { | |||
| // Looks like an extension or an Any. | |||
| // | |||
| // TODO: Check whether we need to handle | |||
| // namespace rooted names (e.g. ".something.Foo"). | |||
| extName, err := p.consumeExtName() | |||
| if err != nil { | |||
| return err | |||
| } | |||
| if s := strings.LastIndex(extName, "/"); s >= 0 { | |||
| // If it contains a slash, it's an Any type URL. | |||
| messageName := extName[s+1:] | |||
| mt := MessageType(messageName) | |||
| if mt == nil { | |||
| return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) | |||
| } | |||
| tok = p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| // consume an optional colon | |||
| if tok.value == ":" { | |||
| tok = p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| } | |||
| var terminator string | |||
| switch tok.value { | |||
| case "<": | |||
| terminator = ">" | |||
| case "{": | |||
| terminator = "}" | |||
| default: | |||
| return p.errorf("expected '{' or '<', found %q", tok.value) | |||
| } | |||
| v := reflect.New(mt.Elem()) | |||
| if pe := p.readStruct(v.Elem(), terminator); pe != nil { | |||
| return pe | |||
| } | |||
| b, err := Marshal(v.Interface().(Message)) | |||
| if err != nil { | |||
| return p.errorf("failed to marshal message of type %q: %v", messageName, err) | |||
| } | |||
| sv.FieldByName("TypeUrl").SetString(extName) | |||
| sv.FieldByName("Value").SetBytes(b) | |||
| continue | |||
| } | |||
| var desc *ExtensionDesc | |||
| // This could be faster, but it's functional. | |||
| // TODO: Do something smarter than a linear scan. | |||
| for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { | |||
| if d.Name == extName { | |||
| desc = d | |||
| break | |||
| } | |||
| } | |||
| if desc == nil { | |||
| return p.errorf("unrecognized extension %q", extName) | |||
| } | |||
| props := &Properties{} | |||
| props.Parse(desc.Tag) | |||
| typ := reflect.TypeOf(desc.ExtensionType) | |||
| if err := p.checkForColon(props, typ); err != nil { | |||
| return err | |||
| } | |||
| rep := desc.repeated() | |||
| // Read the extension structure, and set it in | |||
| // the value we're constructing. | |||
| var ext reflect.Value | |||
| if !rep { | |||
| ext = reflect.New(typ).Elem() | |||
| } else { | |||
| ext = reflect.New(typ.Elem()).Elem() | |||
| } | |||
| if err := p.readAny(ext, props); err != nil { | |||
| if _, ok := err.(*RequiredNotSetError); !ok { | |||
| return err | |||
| } | |||
| reqFieldErr = err | |||
| } | |||
| ep := sv.Addr().Interface().(extendableProto) | |||
| if !rep { | |||
| SetExtension(ep, desc, ext.Interface()) | |||
| } else { | |||
| old, err := GetExtension(ep, desc) | |||
| var sl reflect.Value | |||
| if err == nil { | |||
| sl = reflect.ValueOf(old) // existing slice | |||
| } else { | |||
| sl = reflect.MakeSlice(typ, 0, 1) | |||
| } | |||
| sl = reflect.Append(sl, ext) | |||
| SetExtension(ep, desc, sl.Interface()) | |||
| } | |||
| if err := p.consumeOptionalSeparator(); err != nil { | |||
| return err | |||
| } | |||
| continue | |||
| } | |||
| // This is a normal, non-extension field. | |||
| name := tok.value | |||
| var dst reflect.Value | |||
| fi, props, ok := structFieldByName(sprops, name) | |||
| if ok { | |||
| dst = sv.Field(fi) | |||
| } else if oop, ok := sprops.OneofTypes[name]; ok { | |||
| // It is a oneof. | |||
| props = oop.Prop | |||
| nv := reflect.New(oop.Type.Elem()) | |||
| dst = nv.Elem().Field(0) | |||
| sv.Field(oop.Field).Set(nv) | |||
| } | |||
| if !dst.IsValid() { | |||
| return p.errorf("unknown field name %q in %v", name, st) | |||
| } | |||
| if dst.Kind() == reflect.Map { | |||
| // Consume any colon. | |||
| if err := p.checkForColon(props, dst.Type()); err != nil { | |||
| return err | |||
| } | |||
| // Construct the map if it doesn't already exist. | |||
| if dst.IsNil() { | |||
| dst.Set(reflect.MakeMap(dst.Type())) | |||
| } | |||
| key := reflect.New(dst.Type().Key()).Elem() | |||
| val := reflect.New(dst.Type().Elem()).Elem() | |||
| // The map entry should be this sequence of tokens: | |||
| // < key : KEY value : VALUE > | |||
| // Technically the "key" and "value" could come in any order, | |||
| // but in practice they won't. | |||
| tok := p.next() | |||
| var terminator string | |||
| switch tok.value { | |||
| case "<": | |||
| terminator = ">" | |||
| case "{": | |||
| terminator = "}" | |||
| default: | |||
| return p.errorf("expected '{' or '<', found %q", tok.value) | |||
| } | |||
| if err := p.consumeToken("key"); err != nil { | |||
| return err | |||
| } | |||
| if err := p.consumeToken(":"); err != nil { | |||
| return err | |||
| } | |||
| if err := p.readAny(key, props.mkeyprop); err != nil { | |||
| return err | |||
| } | |||
| if err := p.consumeOptionalSeparator(); err != nil { | |||
| return err | |||
| } | |||
| if err := p.consumeToken("value"); err != nil { | |||
| return err | |||
| } | |||
| if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { | |||
| return err | |||
| } | |||
| if err := p.readAny(val, props.mvalprop); err != nil { | |||
| return err | |||
| } | |||
| if err := p.consumeOptionalSeparator(); err != nil { | |||
| return err | |||
| } | |||
| if err := p.consumeToken(terminator); err != nil { | |||
| return err | |||
| } | |||
| dst.SetMapIndex(key, val) | |||
| continue | |||
| } | |||
| // Check that it's not already set if it's not a repeated field. | |||
| if !props.Repeated && fieldSet[name] { | |||
| return p.errorf("non-repeated field %q was repeated", name) | |||
| } | |||
| if err := p.checkForColon(props, dst.Type()); err != nil { | |||
| return err | |||
| } | |||
| // Parse into the field. | |||
| fieldSet[name] = true | |||
| if err := p.readAny(dst, props); err != nil { | |||
| if _, ok := err.(*RequiredNotSetError); !ok { | |||
| return err | |||
| } | |||
| reqFieldErr = err | |||
| } else if props.Required { | |||
| reqCount-- | |||
| } | |||
| if err := p.consumeOptionalSeparator(); err != nil { | |||
| return err | |||
| } | |||
| } | |||
| if reqCount > 0 { | |||
| return p.missingRequiredFieldError(sv) | |||
| } | |||
| return reqFieldErr | |||
| } | |||
| // consumeExtName consumes extension name or expanded Any type URL and the | |||
| // following ']'. It returns the name or URL consumed. | |||
| func (p *textParser) consumeExtName() (string, error) { | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return "", tok.err | |||
| } | |||
| // If extension name or type url is quoted, it's a single token. | |||
| if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { | |||
| name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) | |||
| if err != nil { | |||
| return "", err | |||
| } | |||
| return name, p.consumeToken("]") | |||
| } | |||
| // Consume everything up to "]" | |||
| var parts []string | |||
| for tok.value != "]" { | |||
| parts = append(parts, tok.value) | |||
| tok = p.next() | |||
| if tok.err != nil { | |||
| return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) | |||
| } | |||
| } | |||
| return strings.Join(parts, ""), nil | |||
| } | |||
| // consumeOptionalSeparator consumes an optional semicolon or comma. | |||
| // It is used in readStruct to provide backward compatibility. | |||
| func (p *textParser) consumeOptionalSeparator() error { | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| if tok.value != ";" && tok.value != "," { | |||
| p.back() | |||
| } | |||
| return nil | |||
| } | |||
| func (p *textParser) readAny(v reflect.Value, props *Properties) error { | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| if tok.value == "" { | |||
| return p.errorf("unexpected EOF") | |||
| } | |||
| switch fv := v; fv.Kind() { | |||
| case reflect.Slice: | |||
| at := v.Type() | |||
| if at.Elem().Kind() == reflect.Uint8 { | |||
| // Special case for []byte | |||
| if tok.value[0] != '"' && tok.value[0] != '\'' { | |||
| // Deliberately written out here, as the error after | |||
| // this switch statement would write "invalid []byte: ...", | |||
| // which is not as user-friendly. | |||
| return p.errorf("invalid string: %v", tok.value) | |||
| } | |||
| bytes := []byte(tok.unquoted) | |||
| fv.Set(reflect.ValueOf(bytes)) | |||
| return nil | |||
| } | |||
| // Repeated field. | |||
| if tok.value == "[" { | |||
| // Repeated field with list notation, like [1,2,3]. | |||
| for { | |||
| fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) | |||
| err := p.readAny(fv.Index(fv.Len()-1), props) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| tok := p.next() | |||
| if tok.err != nil { | |||
| return tok.err | |||
| } | |||
| if tok.value == "]" { | |||
| break | |||
| } | |||
| if tok.value != "," { | |||
| return p.errorf("Expected ']' or ',' found %q", tok.value) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // One value of the repeated field. | |||
| p.back() | |||
| fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) | |||
| return p.readAny(fv.Index(fv.Len()-1), props) | |||
| case reflect.Bool: | |||
| // Either "true", "false", 1 or 0. | |||
| switch tok.value { | |||
| case "true", "1": | |||
| fv.SetBool(true) | |||
| return nil | |||
| case "false", "0": | |||
| fv.SetBool(false) | |||
| return nil | |||
| } | |||
| case reflect.Float32, reflect.Float64: | |||
| v := tok.value | |||
| // Ignore 'f' for compatibility with output generated by C++, but don't | |||
| // remove 'f' when the value is "-inf" or "inf". | |||
| if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { | |||
| v = v[:len(v)-1] | |||
| } | |||
| if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { | |||
| fv.SetFloat(f) | |||
| return nil | |||
| } | |||
| case reflect.Int32: | |||
| if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { | |||
| fv.SetInt(x) | |||
| return nil | |||
| } | |||
| if len(props.Enum) == 0 { | |||
| break | |||
| } | |||
| m, ok := enumValueMaps[props.Enum] | |||
| if !ok { | |||
| break | |||
| } | |||
| x, ok := m[tok.value] | |||
| if !ok { | |||
| break | |||
| } | |||
| fv.SetInt(int64(x)) | |||
| return nil | |||
| case reflect.Int64: | |||
| if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { | |||
| fv.SetInt(x) | |||
| return nil | |||
| } | |||
| case reflect.Ptr: | |||
| // A basic field (indirected through pointer), or a repeated message/group | |||
| p.back() | |||
| fv.Set(reflect.New(fv.Type().Elem())) | |||
| return p.readAny(fv.Elem(), props) | |||
| case reflect.String: | |||
| if tok.value[0] == '"' || tok.value[0] == '\'' { | |||
| fv.SetString(tok.unquoted) | |||
| return nil | |||
| } | |||
| case reflect.Struct: | |||
| var terminator string | |||
| switch tok.value { | |||
| case "{": | |||
| terminator = "}" | |||
| case "<": | |||
| terminator = ">" | |||
| default: | |||
| return p.errorf("expected '{' or '<', found %q", tok.value) | |||
| } | |||
| // TODO: Handle nested messages which implement encoding.TextUnmarshaler. | |||
| return p.readStruct(fv, terminator) | |||
| case reflect.Uint32: | |||
| if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { | |||
| fv.SetUint(uint64(x)) | |||
| return nil | |||
| } | |||
| case reflect.Uint64: | |||
| if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { | |||
| fv.SetUint(x) | |||
| return nil | |||
| } | |||
| } | |||
| return p.errorf("invalid %v: %v", v.Type(), tok.value) | |||
| } | |||
| // UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb | |||
| // before starting to unmarshal, so any existing data in pb is always removed. | |||
| // If a required field is not set and no other error occurs, | |||
| // UnmarshalText returns *RequiredNotSetError. | |||
| func UnmarshalText(s string, pb Message) error { | |||
| if um, ok := pb.(encoding.TextUnmarshaler); ok { | |||
| err := um.UnmarshalText([]byte(s)) | |||
| return err | |||
| } | |||
| pb.Reset() | |||
| v := reflect.ValueOf(pb) | |||
| if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { | |||
| return pe | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,15 @@ | |||
| # This is the official list of Snappy-Go authors for copyright purposes. | |||
| # This file is distinct from the CONTRIBUTORS files. | |||
| # See the latter for an explanation. | |||
| # Names should be added to this file as | |||
| # Name or Organization <email address> | |||
| # The email address is not required for organizations. | |||
| # Please keep the list sorted. | |||
| Damian Gryski <dgryski@gmail.com> | |||
| Google Inc. | |||
| Jan Mercl <0xjnml@gmail.com> | |||
| Rodolfo Carvalho <rhcarvalho@gmail.com> | |||
| Sebastien Binet <seb.binet@gmail.com> | |||
| @@ -0,0 +1,37 @@ | |||
| # This is the official list of people who can contribute | |||
| # (and typically have contributed) code to the Snappy-Go repository. | |||
| # The AUTHORS file lists the copyright holders; this file | |||
| # lists people. For example, Google employees are listed here | |||
| # but not in AUTHORS, because Google holds the copyright. | |||
| # | |||
| # The submission process automatically checks to make sure | |||
| # that people submitting code are listed in this file (by email address). | |||
| # | |||
| # Names should be added to this file only after verifying that | |||
| # the individual or the individual's organization has agreed to | |||
| # the appropriate Contributor License Agreement, found here: | |||
| # | |||
| # http://code.google.com/legal/individual-cla-v1.0.html | |||
| # http://code.google.com/legal/corporate-cla-v1.0.html | |||
| # | |||
| # The agreement for individuals can be filled out on the web. | |||
| # | |||
| # When adding J Random Contributor's name to this file, | |||
| # either J's name or J's organization's name should be | |||
| # added to the AUTHORS file, depending on whether the | |||
| # individual or corporate CLA was used. | |||
| # Names should be added to this file like so: | |||
| # Name <email address> | |||
| # Please keep the list sorted. | |||
| Damian Gryski <dgryski@gmail.com> | |||
| Jan Mercl <0xjnml@gmail.com> | |||
| Kai Backman <kaib@golang.org> | |||
| Marc-Antoine Ruel <maruel@chromium.org> | |||
| Nigel Tao <nigeltao@golang.org> | |||
| Rob Pike <r@golang.org> | |||
| Rodolfo Carvalho <rhcarvalho@gmail.com> | |||
| Russ Cox <rsc@golang.org> | |||
| Sebastien Binet <seb.binet@gmail.com> | |||
| @@ -0,0 +1,27 @@ | |||
| Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| * Neither the name of Google Inc. nor the names of its | |||
| contributors may be used to endorse or promote products derived from | |||
| this software without specific prior written permission. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,7 @@ | |||
| The Snappy compression format in the Go programming language. | |||
| To download and install from source: | |||
| $ go get github.com/golang/snappy | |||
| Unless otherwise noted, the Snappy-Go source files are distributed | |||
| under the BSD-style license found in the LICENSE file. | |||
| @@ -0,0 +1,237 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snappy | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "io" | |||
| ) | |||
| var ( | |||
| // ErrCorrupt reports that the input is invalid. | |||
| ErrCorrupt = errors.New("snappy: corrupt input") | |||
| // ErrTooLarge reports that the uncompressed length is too large. | |||
| ErrTooLarge = errors.New("snappy: decoded block is too large") | |||
| // ErrUnsupported reports that the input isn't supported. | |||
| ErrUnsupported = errors.New("snappy: unsupported input") | |||
| errUnsupportedCopy4Tag = errors.New("snappy: unsupported COPY_4 tag") | |||
| errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") | |||
| ) | |||
| // DecodedLen returns the length of the decoded block. | |||
| func DecodedLen(src []byte) (int, error) { | |||
| v, _, err := decodedLen(src) | |||
| return v, err | |||
| } | |||
| // decodedLen returns the length of the decoded block and the number of bytes | |||
| // that the length header occupied. | |||
| func decodedLen(src []byte) (blockLen, headerLen int, err error) { | |||
| v, n := binary.Uvarint(src) | |||
| if n <= 0 || v > 0xffffffff { | |||
| return 0, 0, ErrCorrupt | |||
| } | |||
| const wordSize = 32 << (^uint(0) >> 32 & 1) | |||
| if wordSize == 32 && v > 0x7fffffff { | |||
| return 0, 0, ErrTooLarge | |||
| } | |||
| return int(v), n, nil | |||
| } | |||
| const ( | |||
| decodeErrCodeCorrupt = 1 | |||
| decodeErrCodeUnsupportedLiteralLength = 2 | |||
| decodeErrCodeUnsupportedCopy4Tag = 3 | |||
| ) | |||
| // Decode returns the decoded form of src. The returned slice may be a sub- | |||
| // slice of dst if dst was large enough to hold the entire decoded block. | |||
| // Otherwise, a newly allocated slice will be returned. | |||
| // | |||
| // The dst and src must not overlap. It is valid to pass a nil dst. | |||
| func Decode(dst, src []byte) ([]byte, error) { | |||
| dLen, s, err := decodedLen(src) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| if dLen <= len(dst) { | |||
| dst = dst[:dLen] | |||
| } else { | |||
| dst = make([]byte, dLen) | |||
| } | |||
| switch decode(dst, src[s:]) { | |||
| case 0: | |||
| return dst, nil | |||
| case decodeErrCodeUnsupportedLiteralLength: | |||
| return nil, errUnsupportedLiteralLength | |||
| case decodeErrCodeUnsupportedCopy4Tag: | |||
| return nil, errUnsupportedCopy4Tag | |||
| } | |||
| return nil, ErrCorrupt | |||
| } | |||
| // NewReader returns a new Reader that decompresses from r, using the framing | |||
| // format described at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| func NewReader(r io.Reader) *Reader { | |||
| return &Reader{ | |||
| r: r, | |||
| decoded: make([]byte, maxBlockSize), | |||
| buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), | |||
| } | |||
| } | |||
| // Reader is an io.Reader that can read Snappy-compressed bytes. | |||
| type Reader struct { | |||
| r io.Reader | |||
| err error | |||
| decoded []byte | |||
| buf []byte | |||
| // decoded[i:j] contains decoded bytes that have not yet been passed on. | |||
| i, j int | |||
| readHeader bool | |||
| } | |||
| // Reset discards any buffered data, resets all state, and switches the Snappy | |||
| // reader to read from r. This permits reusing a Reader rather than allocating | |||
| // a new one. | |||
| func (r *Reader) Reset(reader io.Reader) { | |||
| r.r = reader | |||
| r.err = nil | |||
| r.i = 0 | |||
| r.j = 0 | |||
| r.readHeader = false | |||
| } | |||
| func (r *Reader) readFull(p []byte) (ok bool) { | |||
| if _, r.err = io.ReadFull(r.r, p); r.err != nil { | |||
| if r.err == io.ErrUnexpectedEOF { | |||
| r.err = ErrCorrupt | |||
| } | |||
| return false | |||
| } | |||
| return true | |||
| } | |||
| // Read satisfies the io.Reader interface. | |||
| func (r *Reader) Read(p []byte) (int, error) { | |||
| if r.err != nil { | |||
| return 0, r.err | |||
| } | |||
| for { | |||
| if r.i < r.j { | |||
| n := copy(p, r.decoded[r.i:r.j]) | |||
| r.i += n | |||
| return n, nil | |||
| } | |||
| if !r.readFull(r.buf[:4]) { | |||
| return 0, r.err | |||
| } | |||
| chunkType := r.buf[0] | |||
| if !r.readHeader { | |||
| if chunkType != chunkTypeStreamIdentifier { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| r.readHeader = true | |||
| } | |||
| chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 | |||
| if chunkLen > len(r.buf) { | |||
| r.err = ErrUnsupported | |||
| return 0, r.err | |||
| } | |||
| // The chunk types are specified at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| switch chunkType { | |||
| case chunkTypeCompressedData: | |||
| // Section 4.2. Compressed data (chunk type 0x00). | |||
| if chunkLen < checksumSize { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| buf := r.buf[:chunkLen] | |||
| if !r.readFull(buf) { | |||
| return 0, r.err | |||
| } | |||
| checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | |||
| buf = buf[checksumSize:] | |||
| n, err := DecodedLen(buf) | |||
| if err != nil { | |||
| r.err = err | |||
| return 0, r.err | |||
| } | |||
| if n > len(r.decoded) { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| if _, err := Decode(r.decoded, buf); err != nil { | |||
| r.err = err | |||
| return 0, r.err | |||
| } | |||
| if crc(r.decoded[:n]) != checksum { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| r.i, r.j = 0, n | |||
| continue | |||
| case chunkTypeUncompressedData: | |||
| // Section 4.3. Uncompressed data (chunk type 0x01). | |||
| if chunkLen < checksumSize { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| buf := r.buf[:checksumSize] | |||
| if !r.readFull(buf) { | |||
| return 0, r.err | |||
| } | |||
| checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 | |||
| // Read directly into r.decoded instead of via r.buf. | |||
| n := chunkLen - checksumSize | |||
| if !r.readFull(r.decoded[:n]) { | |||
| return 0, r.err | |||
| } | |||
| if crc(r.decoded[:n]) != checksum { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| r.i, r.j = 0, n | |||
| continue | |||
| case chunkTypeStreamIdentifier: | |||
| // Section 4.1. Stream identifier (chunk type 0xff). | |||
| if chunkLen != len(magicBody) { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| if !r.readFull(r.buf[:len(magicBody)]) { | |||
| return 0, r.err | |||
| } | |||
| for i := 0; i < len(magicBody); i++ { | |||
| if r.buf[i] != magicBody[i] { | |||
| r.err = ErrCorrupt | |||
| return 0, r.err | |||
| } | |||
| } | |||
| continue | |||
| } | |||
| if chunkType <= 0x7f { | |||
| // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). | |||
| r.err = ErrUnsupported | |||
| return 0, r.err | |||
| } | |||
| // Section 4.4 Padding (chunk type 0xfe). | |||
| // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). | |||
| if !r.readFull(r.buf[:chunkLen]) { | |||
| return 0, r.err | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,10 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snappy | |||
| // decode has the same semantics as in decode_other.go. | |||
| // | |||
| //go:noescape | |||
| func decode(dst, src []byte) int | |||
| @@ -0,0 +1,472 @@ | |||
| // Copyright 2016 The Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| #include "textflag.h" | |||
| // func decode(dst, src []byte) int | |||
| // | |||
| // The asm code generally follows the pure Go code in decode_other.go, except | |||
| // where marked with a "!!!". | |||
| // | |||
| // All local variables fit into registers. The non-zero stack size is only to | |||
| // spill registers and push args when issuing a CALL. The register allocation: | |||
| // - AX scratch | |||
| // - BX scratch | |||
| // - CX length or x | |||
| // - DX offset | |||
| // - SI &src[s] | |||
| // - DI &dst[d] | |||
| // + R8 dst_base | |||
| // + R9 dst_len | |||
| // + R10 dst_base + dst_len | |||
| // + R11 src_base | |||
| // + R12 src_len | |||
| // + R13 src_base + src_len | |||
| // - R14 used by doCopy | |||
| // - R15 used by doCopy | |||
| // | |||
| // The registers R8-R13 (marked with a "+") are set at the start of the | |||
| // function, and after a CALL returns, and are not otherwise modified. | |||
| // | |||
| // The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. | |||
| // The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. | |||
| TEXT ·decode(SB), NOSPLIT, $48-56 | |||
| // Initialize SI, DI and R8-R13. | |||
| MOVQ dst_base+0(FP), R8 | |||
| MOVQ dst_len+8(FP), R9 | |||
| MOVQ R8, DI | |||
| MOVQ R8, R10 | |||
| ADDQ R9, R10 | |||
| MOVQ src_base+24(FP), R11 | |||
| MOVQ src_len+32(FP), R12 | |||
| MOVQ R11, SI | |||
| MOVQ R11, R13 | |||
| ADDQ R12, R13 | |||
| loop: | |||
| // for s < len(src) | |||
| CMPQ SI, R13 | |||
| JEQ end | |||
| // CX = uint32(src[s]) | |||
| // | |||
| // switch src[s] & 0x03 | |||
| MOVBLZX (SI), CX | |||
| MOVL CX, BX | |||
| ANDL $3, BX | |||
| CMPL BX, $1 | |||
| JAE tagCopy | |||
| // ---------------------------------------- | |||
| // The code below handles literal tags. | |||
| // case tagLiteral: | |||
| // x := uint32(src[s] >> 2) | |||
| // switch | |||
| SHRL $2, CX | |||
| CMPL CX, $60 | |||
| JAE tagLit60Plus | |||
| // case x < 60: | |||
| // s++ | |||
| INCQ SI | |||
| doLit: | |||
| // This is the end of the inner "switch", when we have a literal tag. | |||
| // | |||
| // We assume that CX == x and x fits in a uint32, where x is the variable | |||
| // used in the pure Go decode_other.go code. | |||
| // length = int(x) + 1 | |||
| // | |||
| // Unlike the pure Go code, we don't need to check if length <= 0 because | |||
| // CX can hold 64 bits, so the increment cannot overflow. | |||
| INCQ CX | |||
| // Prepare to check if copying length bytes will run past the end of dst or | |||
| // src. | |||
| // | |||
| // AX = len(dst) - d | |||
| // BX = len(src) - s | |||
| MOVQ R10, AX | |||
| SUBQ DI, AX | |||
| MOVQ R13, BX | |||
| SUBQ SI, BX | |||
| // !!! Try a faster technique for short (16 or fewer bytes) copies. | |||
| // | |||
| // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { | |||
| // goto callMemmove // Fall back on calling runtime·memmove. | |||
| // } | |||
| // | |||
| // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s | |||
| // against 21 instead of 16, because it cannot assume that all of its input | |||
| // is contiguous in memory and so it needs to leave enough source bytes to | |||
| // read the next tag without refilling buffers, but Go's Decode assumes | |||
| // contiguousness (the src argument is a []byte). | |||
| CMPQ CX, $16 | |||
| JGT callMemmove | |||
| CMPQ AX, $16 | |||
| JLT callMemmove | |||
| CMPQ BX, $16 | |||
| JLT callMemmove | |||
| // !!! Implement the copy from src to dst as a 16-byte load and store. | |||
| // (Decode's documentation says that dst and src must not overlap.) | |||
| // | |||
| // This always copies 16 bytes, instead of only length bytes, but that's | |||
| // OK. If the input is a valid Snappy encoding then subsequent iterations | |||
| // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a | |||
| // non-nil error), so the overrun will be ignored. | |||
| // | |||
| // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or | |||
| // 16-byte loads and stores. This technique probably wouldn't be as | |||
| // effective on architectures that are fussier about alignment. | |||
| MOVOU 0(SI), X0 | |||
| MOVOU X0, 0(DI) | |||
| // d += length | |||
| // s += length | |||
| ADDQ CX, DI | |||
| ADDQ CX, SI | |||
| JMP loop | |||
| callMemmove: | |||
| // if length > len(dst)-d || length > len(src)-s { etc } | |||
| CMPQ CX, AX | |||
| JGT errCorrupt | |||
| CMPQ CX, BX | |||
| JGT errCorrupt | |||
| // copy(dst[d:], src[s:s+length]) | |||
| // | |||
| // This means calling runtime·memmove(&dst[d], &src[s], length), so we push | |||
| // DI, SI and CX as arguments. Coincidentally, we also need to spill those | |||
| // three registers to the stack, to save local variables across the CALL. | |||
| MOVQ DI, 0(SP) | |||
| MOVQ SI, 8(SP) | |||
| MOVQ CX, 16(SP) | |||
| MOVQ DI, 24(SP) | |||
| MOVQ SI, 32(SP) | |||
| MOVQ CX, 40(SP) | |||
| CALL runtime·memmove(SB) | |||
| // Restore local variables: unspill registers from the stack and | |||
| // re-calculate R8-R13. | |||
| MOVQ 24(SP), DI | |||
| MOVQ 32(SP), SI | |||
| MOVQ 40(SP), CX | |||
| MOVQ dst_base+0(FP), R8 | |||
| MOVQ dst_len+8(FP), R9 | |||
| MOVQ R8, R10 | |||
| ADDQ R9, R10 | |||
| MOVQ src_base+24(FP), R11 | |||
| MOVQ src_len+32(FP), R12 | |||
| MOVQ R11, R13 | |||
| ADDQ R12, R13 | |||
| // d += length | |||
| // s += length | |||
| ADDQ CX, DI | |||
| ADDQ CX, SI | |||
| JMP loop | |||
| tagLit60Plus: | |||
| // !!! This fragment does the | |||
| // | |||
| // s += x - 58; if uint(s) > uint(len(src)) { etc } | |||
| // | |||
| // checks. In the asm version, we code it once instead of once per switch case. | |||
| ADDQ CX, SI | |||
| SUBQ $58, SI | |||
| MOVQ SI, BX | |||
| SUBQ R11, BX | |||
| CMPQ BX, R12 | |||
| JA errCorrupt | |||
| // case x == 60: | |||
| CMPL CX, $61 | |||
| JEQ tagLit61 | |||
| JA tagLit62Plus | |||
| // x = uint32(src[s-1]) | |||
| MOVBLZX -1(SI), CX | |||
| JMP doLit | |||
| tagLit61: | |||
| // case x == 61: | |||
| // x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| MOVWLZX -2(SI), CX | |||
| JMP doLit | |||
| tagLit62Plus: | |||
| CMPL CX, $62 | |||
| JA tagLit63 | |||
| // case x == 62: | |||
| // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| MOVWLZX -3(SI), CX | |||
| MOVBLZX -1(SI), BX | |||
| SHLL $16, BX | |||
| ORL BX, CX | |||
| JMP doLit | |||
| tagLit63: | |||
| // case x == 63: | |||
| // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| MOVL -4(SI), CX | |||
| JMP doLit | |||
| // The code above handles literal tags. | |||
| // ---------------------------------------- | |||
| // The code below handles copy tags. | |||
| tagCopy2: | |||
| // case tagCopy2: | |||
| // s += 3 | |||
| ADDQ $3, SI | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| MOVQ SI, BX | |||
| SUBQ R11, BX | |||
| CMPQ BX, R12 | |||
| JA errCorrupt | |||
| // length = 1 + int(src[s-3])>>2 | |||
| SHRQ $2, CX | |||
| INCQ CX | |||
| // offset = int(src[s-2]) | int(src[s-1])<<8 | |||
| MOVWQZX -2(SI), DX | |||
| JMP doCopy | |||
| tagCopy: | |||
| // We have a copy tag. We assume that: | |||
| // - BX == src[s] & 0x03 | |||
| // - CX == src[s] | |||
| CMPQ BX, $2 | |||
| JEQ tagCopy2 | |||
| JA errUC4T | |||
| // case tagCopy1: | |||
| // s += 2 | |||
| ADDQ $2, SI | |||
| // if uint(s) > uint(len(src)) { etc } | |||
| MOVQ SI, BX | |||
| SUBQ R11, BX | |||
| CMPQ BX, R12 | |||
| JA errCorrupt | |||
| // offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) | |||
| MOVQ CX, DX | |||
| ANDQ $0xe0, DX | |||
| SHLQ $3, DX | |||
| MOVBQZX -1(SI), BX | |||
| ORQ BX, DX | |||
| // length = 4 + int(src[s-2])>>2&0x7 | |||
| SHRQ $2, CX | |||
| ANDQ $7, CX | |||
| ADDQ $4, CX | |||
| doCopy: | |||
| // This is the end of the outer "switch", when we have a copy tag. | |||
| // | |||
| // We assume that: | |||
| // - CX == length && CX > 0 | |||
| // - DX == offset | |||
| // if offset <= 0 { etc } | |||
| CMPQ DX, $0 | |||
| JLE errCorrupt | |||
| // if d < offset { etc } | |||
| MOVQ DI, BX | |||
| SUBQ R8, BX | |||
| CMPQ BX, DX | |||
| JLT errCorrupt | |||
| // if length > len(dst)-d { etc } | |||
| MOVQ R10, BX | |||
| SUBQ DI, BX | |||
| CMPQ CX, BX | |||
| JGT errCorrupt | |||
| // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length | |||
| // | |||
| // Set: | |||
| // - R14 = len(dst)-d | |||
| // - R15 = &dst[d-offset] | |||
| MOVQ R10, R14 | |||
| SUBQ DI, R14 | |||
| MOVQ DI, R15 | |||
| SUBQ DX, R15 | |||
| // !!! Try a faster technique for short (16 or fewer bytes) forward copies. | |||
| // | |||
| // First, try using two 8-byte load/stores, similar to the doLit technique | |||
| // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is | |||
| // still OK if offset >= 8. Note that this has to be two 8-byte load/stores | |||
| // and not one 16-byte load/store, and the first store has to be before the | |||
| // second load, due to the overlap if offset is in the range [8, 16). | |||
| // | |||
| // if length > 16 || offset < 8 || len(dst)-d < 16 { | |||
| // goto slowForwardCopy | |||
| // } | |||
| // copy 16 bytes | |||
| // d += length | |||
| CMPQ CX, $16 | |||
| JGT slowForwardCopy | |||
| CMPQ DX, $8 | |||
| JLT slowForwardCopy | |||
| CMPQ R14, $16 | |||
| JLT slowForwardCopy | |||
| MOVQ 0(R15), AX | |||
| MOVQ AX, 0(DI) | |||
| MOVQ 8(R15), BX | |||
| MOVQ BX, 8(DI) | |||
| ADDQ CX, DI | |||
| JMP loop | |||
| slowForwardCopy: | |||
| // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we | |||
| // can still try 8-byte load stores, provided we can overrun up to 10 extra | |||
| // bytes. As above, the overrun will be fixed up by subsequent iterations | |||
| // of the outermost loop. | |||
| // | |||
| // The C++ snappy code calls this technique IncrementalCopyFastPath. Its | |||
| // commentary says: | |||
| // | |||
| // ---- | |||
| // | |||
| // The main part of this loop is a simple copy of eight bytes at a time | |||
| // until we've copied (at least) the requested amount of bytes. However, | |||
| // if d and d-offset are less than eight bytes apart (indicating a | |||
| // repeating pattern of length < 8), we first need to expand the pattern in | |||
| // order to get the correct results. For instance, if the buffer looks like | |||
| // this, with the eight-byte <d-offset> and <d> patterns marked as | |||
| // intervals: | |||
| // | |||
| // abxxxxxxxxxxxx | |||
| // [------] d-offset | |||
| // [------] d | |||
| // | |||
| // a single eight-byte copy from <d-offset> to <d> will repeat the pattern | |||
| // once, after which we can move <d> two bytes without moving <d-offset>: | |||
| // | |||
| // ababxxxxxxxxxx | |||
| // [------] d-offset | |||
| // [------] d | |||
| // | |||
| // and repeat the exercise until the two no longer overlap. | |||
| // | |||
| // This allows us to do very well in the special case of one single byte | |||
| // repeated many times, without taking a big hit for more general cases. | |||
| // | |||
| // The worst case of extra writing past the end of the match occurs when | |||
| // offset == 1 and length == 1; the last copy will read from byte positions | |||
| // [0..7] and write to [4..11], whereas it was only supposed to write to | |||
| // position 1. Thus, ten excess bytes. | |||
| // | |||
| // ---- | |||
| // | |||
| // That "10 byte overrun" worst case is confirmed by Go's | |||
| // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy | |||
| // and finishSlowForwardCopy algorithm. | |||
| // | |||
| // if length > len(dst)-d-10 { | |||
| // goto verySlowForwardCopy | |||
| // } | |||
| SUBQ $10, R14 | |||
| CMPQ CX, R14 | |||
| JGT verySlowForwardCopy | |||
| makeOffsetAtLeast8: | |||
| // !!! As above, expand the pattern so that offset >= 8 and we can use | |||
| // 8-byte load/stores. | |||
| // | |||
| // for offset < 8 { | |||
| // copy 8 bytes from dst[d-offset:] to dst[d:] | |||
| // length -= offset | |||
| // d += offset | |||
| // offset += offset | |||
| // // The two previous lines together means that d-offset, and therefore | |||
| // // R15, is unchanged. | |||
| // } | |||
| CMPQ DX, $8 | |||
| JGE fixUpSlowForwardCopy | |||
| MOVQ (R15), BX | |||
| MOVQ BX, (DI) | |||
| SUBQ DX, CX | |||
| ADDQ DX, DI | |||
| ADDQ DX, DX | |||
| JMP makeOffsetAtLeast8 | |||
| fixUpSlowForwardCopy: | |||
| // !!! Add length (which might be negative now) to d (implied by DI being | |||
| // &dst[d]) so that d ends up at the right place when we jump back to the | |||
| // top of the loop. Before we do that, though, we save DI to AX so that, if | |||
| // length is positive, copying the remaining length bytes will write to the | |||
| // right place. | |||
| MOVQ DI, AX | |||
| ADDQ CX, DI | |||
| finishSlowForwardCopy: | |||
| // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative | |||
| // length means that we overrun, but as above, that will be fixed up by | |||
| // subsequent iterations of the outermost loop. | |||
| CMPQ CX, $0 | |||
| JLE loop | |||
| MOVQ (R15), BX | |||
| MOVQ BX, (AX) | |||
| ADDQ $8, R15 | |||
| ADDQ $8, AX | |||
| SUBQ $8, CX | |||
| JMP finishSlowForwardCopy | |||
| verySlowForwardCopy: | |||
| // verySlowForwardCopy is a simple implementation of forward copy. In C | |||
| // parlance, this is a do/while loop instead of a while loop, since we know | |||
| // that length > 0. In Go syntax: | |||
| // | |||
| // for { | |||
| // dst[d] = dst[d - offset] | |||
| // d++ | |||
| // length-- | |||
| // if length == 0 { | |||
| // break | |||
| // } | |||
| // } | |||
| MOVB (R15), BX | |||
| MOVB BX, (DI) | |||
| INCQ R15 | |||
| INCQ DI | |||
| DECQ CX | |||
| JNZ verySlowForwardCopy | |||
| JMP loop | |||
| // The code above handles copy tags. | |||
| // ---------------------------------------- | |||
| end: | |||
| // This is the end of the "for s < len(src)". | |||
| // | |||
| // if d != len(dst) { etc } | |||
| CMPQ DI, R10 | |||
| JNE errCorrupt | |||
| // return 0 | |||
| MOVQ $0, ret+48(FP) | |||
| RET | |||
| errCorrupt: | |||
| // return decodeErrCodeCorrupt | |||
| MOVQ $1, ret+48(FP) | |||
| RET | |||
| errUC4T: | |||
| // return decodeErrCodeUnsupportedCopy4Tag | |||
| MOVQ $3, ret+48(FP) | |||
| RET | |||
| @@ -0,0 +1,96 @@ | |||
| // Copyright 2016 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // +build !amd64 | |||
| package snappy | |||
| // decode writes the decoding of src to dst. It assumes that the varint-encoded | |||
| // length of the decompressed bytes has already been read, and that len(dst) | |||
| // equals that length. | |||
| // | |||
| // It returns 0 on success or a decodeErrCodeXxx error code on failure. | |||
| func decode(dst, src []byte) int { | |||
| var d, s, offset, length int | |||
| for s < len(src) { | |||
| switch src[s] & 0x03 { | |||
| case tagLiteral: | |||
| x := uint32(src[s] >> 2) | |||
| switch { | |||
| case x < 60: | |||
| s++ | |||
| case x == 60: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-1]) | |||
| case x == 61: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-2]) | uint32(src[s-1])<<8 | |||
| case x == 62: | |||
| s += 4 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 | |||
| case x == 63: | |||
| s += 5 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 | |||
| } | |||
| length = int(x) + 1 | |||
| if length <= 0 { | |||
| return decodeErrCodeUnsupportedLiteralLength | |||
| } | |||
| if length > len(dst)-d || length > len(src)-s { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| copy(dst[d:], src[s:s+length]) | |||
| d += length | |||
| s += length | |||
| continue | |||
| case tagCopy1: | |||
| s += 2 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 4 + int(src[s-2])>>2&0x7 | |||
| offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) | |||
| case tagCopy2: | |||
| s += 3 | |||
| if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| length = 1 + int(src[s-3])>>2 | |||
| offset = int(src[s-2]) | int(src[s-1])<<8 | |||
| case tagCopy4: | |||
| return decodeErrCodeUnsupportedCopy4Tag | |||
| } | |||
| if offset <= 0 || d < offset || length > len(dst)-d { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike | |||
| // the built-in copy function, this byte-by-byte copy always runs | |||
| // forwards, even if the slices overlap. Conceptually, this is: | |||
| // | |||
| // d += forwardCopy(dst[d:d+length], dst[d-offset:]) | |||
| for end := d + length; d != end; d++ { | |||
| dst[d] = dst[d-offset] | |||
| } | |||
| } | |||
| if d != len(dst) { | |||
| return decodeErrCodeCorrupt | |||
| } | |||
| return 0 | |||
| } | |||
| @@ -0,0 +1,403 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| package snappy | |||
| import ( | |||
| "encoding/binary" | |||
| "errors" | |||
| "io" | |||
| ) | |||
| // maxOffset limits how far copy back-references can go, the same as the C++ | |||
| // code. | |||
| const maxOffset = 1 << 15 | |||
| // emitLiteral writes a literal chunk and returns the number of bytes written. | |||
| func emitLiteral(dst, lit []byte) int { | |||
| i, n := 0, uint(len(lit)-1) | |||
| switch { | |||
| case n < 60: | |||
| dst[0] = uint8(n)<<2 | tagLiteral | |||
| i = 1 | |||
| case n < 1<<8: | |||
| dst[0] = 60<<2 | tagLiteral | |||
| dst[1] = uint8(n) | |||
| i = 2 | |||
| case n < 1<<16: | |||
| dst[0] = 61<<2 | tagLiteral | |||
| dst[1] = uint8(n) | |||
| dst[2] = uint8(n >> 8) | |||
| i = 3 | |||
| case n < 1<<24: | |||
| dst[0] = 62<<2 | tagLiteral | |||
| dst[1] = uint8(n) | |||
| dst[2] = uint8(n >> 8) | |||
| dst[3] = uint8(n >> 16) | |||
| i = 4 | |||
| case int64(n) < 1<<32: | |||
| dst[0] = 63<<2 | tagLiteral | |||
| dst[1] = uint8(n) | |||
| dst[2] = uint8(n >> 8) | |||
| dst[3] = uint8(n >> 16) | |||
| dst[4] = uint8(n >> 24) | |||
| i = 5 | |||
| default: | |||
| panic("snappy: source buffer is too long") | |||
| } | |||
| if copy(dst[i:], lit) != len(lit) { | |||
| panic("snappy: destination buffer is too short") | |||
| } | |||
| return i + len(lit) | |||
| } | |||
| // emitCopy writes a copy chunk and returns the number of bytes written. | |||
| func emitCopy(dst []byte, offset, length int32) int { | |||
| i := 0 | |||
| for length > 0 { | |||
| x := length - 4 | |||
| if 0 <= x && x < 1<<3 && offset < 1<<11 { | |||
| dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 | |||
| dst[i+1] = uint8(offset) | |||
| i += 2 | |||
| break | |||
| } | |||
| x = length | |||
| if x > 1<<6 { | |||
| x = 1 << 6 | |||
| } | |||
| dst[i+0] = uint8(x-1)<<2 | tagCopy2 | |||
| dst[i+1] = uint8(offset) | |||
| dst[i+2] = uint8(offset >> 8) | |||
| i += 3 | |||
| length -= x | |||
| } | |||
| return i | |||
| } | |||
| // Encode returns the encoded form of src. The returned slice may be a sub- | |||
| // slice of dst if dst was large enough to hold the entire encoded block. | |||
| // Otherwise, a newly allocated slice will be returned. | |||
| // | |||
| // It is valid to pass a nil dst. | |||
| func Encode(dst, src []byte) []byte { | |||
| if n := MaxEncodedLen(len(src)); n < 0 { | |||
| panic(ErrTooLarge) | |||
| } else if len(dst) < n { | |||
| dst = make([]byte, n) | |||
| } | |||
| // The block starts with the varint-encoded length of the decompressed bytes. | |||
| d := binary.PutUvarint(dst, uint64(len(src))) | |||
| for len(src) > 0 { | |||
| p := src | |||
| src = nil | |||
| if len(p) > maxBlockSize { | |||
| p, src = p[:maxBlockSize], p[maxBlockSize:] | |||
| } | |||
| d += encodeBlock(dst[d:], p) | |||
| } | |||
| return dst[:d] | |||
| } | |||
| // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It | |||
| // assumes that the varint-encoded length of the decompressed bytes has already | |||
| // been written. | |||
| // | |||
| // It also assumes that: | |||
| // len(dst) >= MaxEncodedLen(len(src)) && | |||
| // 0 < len(src) && len(src) <= maxBlockSize | |||
| func encodeBlock(dst, src []byte) (d int) { | |||
| // Return early if src is short. | |||
| if len(src) <= 4 { | |||
| return emitLiteral(dst, src) | |||
| } | |||
| // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. | |||
| const maxTableSize = 1 << 14 | |||
| shift, tableSize := uint(32-8), 1<<8 | |||
| for tableSize < maxTableSize && tableSize < len(src) { | |||
| shift-- | |||
| tableSize *= 2 | |||
| } | |||
| var table [maxTableSize]int32 | |||
| // Iterate over the source bytes. | |||
| var ( | |||
| s int32 // The iterator position. | |||
| t int32 // The last position with the same hash as s. | |||
| lit int32 // The start position of any pending literal bytes. | |||
| // Copied from the C++ snappy implementation: | |||
| // | |||
| // Heuristic match skipping: If 32 bytes are scanned with no matches | |||
| // found, start looking only at every other byte. If 32 more bytes are | |||
| // scanned, look at every third byte, etc.. When a match is found, | |||
| // immediately go back to looking at every byte. This is a small loss | |||
| // (~5% performance, ~0.1% density) for compressible data due to more | |||
| // bookkeeping, but for non-compressible data (such as JPEG) it's a | |||
| // huge win since the compressor quickly "realizes" the data is | |||
| // incompressible and doesn't bother looking for matches everywhere. | |||
| // | |||
| // The "skip" variable keeps track of how many bytes there are since | |||
| // the last match; dividing it by 32 (ie. right-shifting by five) gives | |||
| // the number of bytes to move ahead for each iteration. | |||
| skip uint32 = 32 | |||
| ) | |||
| for uint32(s+3) < uint32(len(src)) { // The uint32 conversions catch overflow from the +3. | |||
| // Update the hash table. | |||
| b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] | |||
| h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 | |||
| p := &table[(h*0x1e35a7bd)>>shift] | |||
| // We need to to store values in [-1, inf) in table. To save | |||
| // some initialization time, (re)use the table's zero value | |||
| // and shift the values against this zero: add 1 on writes, | |||
| // subtract 1 on reads. | |||
| t, *p = *p-1, s+1 | |||
| // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. | |||
| if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { | |||
| s += int32(skip >> 5) | |||
| skip++ | |||
| continue | |||
| } | |||
| skip = 32 | |||
| // Otherwise, we have a match. First, emit any pending literal bytes. | |||
| if lit != s { | |||
| d += emitLiteral(dst[d:], src[lit:s]) | |||
| } | |||
| // Extend the match to be as long as possible. | |||
| s0 := s | |||
| s, t = s+4, t+4 | |||
| for int(s) < len(src) && src[s] == src[t] { | |||
| s++ | |||
| t++ | |||
| } | |||
| // Emit the copied bytes. | |||
| d += emitCopy(dst[d:], s-t, s-s0) | |||
| lit = s | |||
| } | |||
| // Emit any final pending literal bytes and return. | |||
| if int(lit) != len(src) { | |||
| d += emitLiteral(dst[d:], src[lit:]) | |||
| } | |||
| return d | |||
| } | |||
| // MaxEncodedLen returns the maximum length of a snappy block, given its | |||
| // uncompressed length. | |||
| // | |||
| // It will return a negative value if srcLen is too large to encode. | |||
| func MaxEncodedLen(srcLen int) int { | |||
| n := uint64(srcLen) | |||
| if n > 0xffffffff { | |||
| return -1 | |||
| } | |||
| // Compressed data can be defined as: | |||
| // compressed := item* literal* | |||
| // item := literal* copy | |||
| // | |||
| // The trailing literal sequence has a space blowup of at most 62/60 | |||
| // since a literal of length 60 needs one tag byte + one extra byte | |||
| // for length information. | |||
| // | |||
| // Item blowup is trickier to measure. Suppose the "copy" op copies | |||
| // 4 bytes of data. Because of a special check in the encoding code, | |||
| // we produce a 4-byte copy only if the offset is < 65536. Therefore | |||
| // the copy op takes 3 bytes to encode, and this type of item leads | |||
| // to at most the 62/60 blowup for representing literals. | |||
| // | |||
| // Suppose the "copy" op copies 5 bytes of data. If the offset is big | |||
| // enough, it will take 5 bytes to encode the copy op. Therefore the | |||
| // worst case here is a one-byte literal followed by a five-byte copy. | |||
| // That is, 6 bytes of input turn into 7 bytes of "compressed" data. | |||
| // | |||
| // This last factor dominates the blowup, so the final estimate is: | |||
| n = 32 + n + n/6 | |||
| if n > 0xffffffff { | |||
| return -1 | |||
| } | |||
| return int(n) | |||
| } | |||
| var errClosed = errors.New("snappy: Writer is closed") | |||
| // NewWriter returns a new Writer that compresses to w. | |||
| // | |||
| // The Writer returned does not buffer writes. There is no need to Flush or | |||
| // Close such a Writer. | |||
| // | |||
| // Deprecated: the Writer returned is not suitable for many small writes, only | |||
| // for few large writes. Use NewBufferedWriter instead, which is efficient | |||
| // regardless of the frequency and shape of the writes, and remember to Close | |||
| // that Writer when done. | |||
| func NewWriter(w io.Writer) *Writer { | |||
| return &Writer{ | |||
| w: w, | |||
| obuf: make([]byte, obufLen), | |||
| } | |||
| } | |||
| // NewBufferedWriter returns a new Writer that compresses to w, using the | |||
| // framing format described at | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| // | |||
| // The Writer returned buffers writes. Users must call Close to guarantee all | |||
| // data has been forwarded to the underlying io.Writer. They may also call | |||
| // Flush zero or more times before calling Close. | |||
| func NewBufferedWriter(w io.Writer) *Writer { | |||
| return &Writer{ | |||
| w: w, | |||
| ibuf: make([]byte, 0, maxBlockSize), | |||
| obuf: make([]byte, obufLen), | |||
| } | |||
| } | |||
| // Writer is an io.Writer than can write Snappy-compressed bytes. | |||
| type Writer struct { | |||
| w io.Writer | |||
| err error | |||
| // ibuf is a buffer for the incoming (uncompressed) bytes. | |||
| // | |||
| // Its use is optional. For backwards compatibility, Writers created by the | |||
| // NewWriter function have ibuf == nil, do not buffer incoming bytes, and | |||
| // therefore do not need to be Flush'ed or Close'd. | |||
| ibuf []byte | |||
| // obuf is a buffer for the outgoing (compressed) bytes. | |||
| obuf []byte | |||
| // wroteStreamHeader is whether we have written the stream header. | |||
| wroteStreamHeader bool | |||
| } | |||
| // Reset discards the writer's state and switches the Snappy writer to write to | |||
| // w. This permits reusing a Writer rather than allocating a new one. | |||
| func (w *Writer) Reset(writer io.Writer) { | |||
| w.w = writer | |||
| w.err = nil | |||
| if w.ibuf != nil { | |||
| w.ibuf = w.ibuf[:0] | |||
| } | |||
| w.wroteStreamHeader = false | |||
| } | |||
| // Write satisfies the io.Writer interface. | |||
| func (w *Writer) Write(p []byte) (nRet int, errRet error) { | |||
| if w.ibuf == nil { | |||
| // Do not buffer incoming bytes. This does not perform or compress well | |||
| // if the caller of Writer.Write writes many small slices. This | |||
| // behavior is therefore deprecated, but still supported for backwards | |||
| // compatibility with code that doesn't explicitly Flush or Close. | |||
| return w.write(p) | |||
| } | |||
| // The remainder of this method is based on bufio.Writer.Write from the | |||
| // standard library. | |||
| for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { | |||
| var n int | |||
| if len(w.ibuf) == 0 { | |||
| // Large write, empty buffer. | |||
| // Write directly from p to avoid copy. | |||
| n, _ = w.write(p) | |||
| } else { | |||
| n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | |||
| w.ibuf = w.ibuf[:len(w.ibuf)+n] | |||
| w.Flush() | |||
| } | |||
| nRet += n | |||
| p = p[n:] | |||
| } | |||
| if w.err != nil { | |||
| return nRet, w.err | |||
| } | |||
| n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) | |||
| w.ibuf = w.ibuf[:len(w.ibuf)+n] | |||
| nRet += n | |||
| return nRet, nil | |||
| } | |||
| func (w *Writer) write(p []byte) (nRet int, errRet error) { | |||
| if w.err != nil { | |||
| return 0, w.err | |||
| } | |||
| for len(p) > 0 { | |||
| obufStart := len(magicChunk) | |||
| if !w.wroteStreamHeader { | |||
| w.wroteStreamHeader = true | |||
| copy(w.obuf, magicChunk) | |||
| obufStart = 0 | |||
| } | |||
| var uncompressed []byte | |||
| if len(p) > maxBlockSize { | |||
| uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] | |||
| } else { | |||
| uncompressed, p = p, nil | |||
| } | |||
| checksum := crc(uncompressed) | |||
| // Compress the buffer, discarding the result if the improvement | |||
| // isn't at least 12.5%. | |||
| compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) | |||
| chunkType := uint8(chunkTypeCompressedData) | |||
| chunkLen := 4 + len(compressed) | |||
| obufEnd := obufHeaderLen + len(compressed) | |||
| if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { | |||
| chunkType = chunkTypeUncompressedData | |||
| chunkLen = 4 + len(uncompressed) | |||
| obufEnd = obufHeaderLen | |||
| } | |||
| // Fill in the per-chunk header that comes before the body. | |||
| w.obuf[len(magicChunk)+0] = chunkType | |||
| w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) | |||
| w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) | |||
| w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) | |||
| w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) | |||
| w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) | |||
| w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) | |||
| w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) | |||
| if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { | |||
| w.err = err | |||
| return nRet, err | |||
| } | |||
| if chunkType == chunkTypeUncompressedData { | |||
| if _, err := w.w.Write(uncompressed); err != nil { | |||
| w.err = err | |||
| return nRet, err | |||
| } | |||
| } | |||
| nRet += len(uncompressed) | |||
| } | |||
| return nRet, nil | |||
| } | |||
| // Flush flushes the Writer to its underlying io.Writer. | |||
| func (w *Writer) Flush() error { | |||
| if w.err != nil { | |||
| return w.err | |||
| } | |||
| if len(w.ibuf) == 0 { | |||
| return nil | |||
| } | |||
| w.write(w.ibuf) | |||
| w.ibuf = w.ibuf[:0] | |||
| return w.err | |||
| } | |||
| // Close calls Flush and then closes the Writer. | |||
| func (w *Writer) Close() error { | |||
| w.Flush() | |||
| ret := w.err | |||
| if w.err == nil { | |||
| w.err = errClosed | |||
| } | |||
| return ret | |||
| } | |||
| @@ -0,0 +1,84 @@ | |||
| // Copyright 2011 The Snappy-Go Authors. All rights reserved. | |||
| // Use of this source code is governed by a BSD-style | |||
| // license that can be found in the LICENSE file. | |||
| // Package snappy implements the snappy block-based compression format. | |||
| // It aims for very high speeds and reasonable compression. | |||
| // | |||
| // The C++ snappy implementation is at https://github.com/google/snappy | |||
| package snappy // import "github.com/golang/snappy" | |||
| import ( | |||
| "hash/crc32" | |||
| ) | |||
| /* | |||
| Each encoded block begins with the varint-encoded length of the decoded data, | |||
| followed by a sequence of chunks. Chunks begin and end on byte boundaries. The | |||
| first byte of each chunk is broken into its 2 least and 6 most significant bits | |||
| called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. | |||
| Zero means a literal tag. All other values mean a copy tag. | |||
| For literal tags: | |||
| - If m < 60, the next 1 + m bytes are literal bytes. | |||
| - Otherwise, let n be the little-endian unsigned integer denoted by the next | |||
| m - 59 bytes. The next 1 + n bytes after that are literal bytes. | |||
| For copy tags, length bytes are copied from offset bytes ago, in the style of | |||
| Lempel-Ziv compression algorithms. In particular: | |||
| - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). | |||
| The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 | |||
| of the offset. The next byte is bits 0-7 of the offset. | |||
| - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). | |||
| The length is 1 + m. The offset is the little-endian unsigned integer | |||
| denoted by the next 2 bytes. | |||
| - For l == 3, this tag is a legacy format that is no longer supported. | |||
| */ | |||
| const ( | |||
| tagLiteral = 0x00 | |||
| tagCopy1 = 0x01 | |||
| tagCopy2 = 0x02 | |||
| tagCopy4 = 0x03 | |||
| ) | |||
| const ( | |||
| checksumSize = 4 | |||
| chunkHeaderSize = 4 | |||
| magicChunk = "\xff\x06\x00\x00" + magicBody | |||
| magicBody = "sNaPpY" | |||
| // maxBlockSize is the maximum size of the input to encodeBlock. It is not | |||
| // part of the wire format per se, but some parts of the encoder assume | |||
| // that an offset fits into a uint16. | |||
| // | |||
| // Also, for the framing format (Writer type instead of Encode function), | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt says | |||
| // that "the uncompressed data in a chunk must be no longer than 65536 | |||
| // bytes". | |||
| maxBlockSize = 65536 | |||
| // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is | |||
| // hard coded to be a const instead of a variable, so that obufLen can also | |||
| // be a const. Their equivalence is confirmed by | |||
| // TestMaxEncodedLenOfMaxBlockSize. | |||
| maxEncodedLenOfMaxBlockSize = 76490 | |||
| obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize | |||
| obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize | |||
| ) | |||
| const ( | |||
| chunkTypeCompressedData = 0x00 | |||
| chunkTypeUncompressedData = 0x01 | |||
| chunkTypePadding = 0xfe | |||
| chunkTypeStreamIdentifier = 0xff | |||
| ) | |||
| var crcTable = crc32.MakeTable(crc32.Castagnoli) | |||
| // crc implements the checksum specified in section 3 of | |||
| // https://github.com/google/snappy/blob/master/framing_format.txt | |||
| func crc(b []byte) uint32 { | |||
| c := crc32.Update(0, crcTable, b) | |||
| return uint32(c>>15|c<<17) + 0xa282ead8 | |||
| } | |||
| @@ -0,0 +1,24 @@ | |||
| Copyright (c) 2013 Richard Musiol. All rights reserved. | |||
| Redistribution and use in source and binary forms, with or without | |||
| modification, are permitted provided that the following conditions are | |||
| met: | |||
| * Redistributions of source code must retain the above copyright | |||
| notice, this list of conditions and the following disclaimer. | |||
| * Redistributions in binary form must reproduce the above | |||
| copyright notice, this list of conditions and the following disclaimer | |||
| in the documentation and/or other materials provided with the | |||
| distribution. | |||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |||
| @@ -0,0 +1,168 @@ | |||
| // Package js provides functions for interacting with native JavaScript APIs. Calls to these functions are treated specially by GopherJS and translated directly to their corresponding JavaScript syntax. | |||
| // | |||
| // Use MakeWrapper to expose methods to JavaScript. When passing values directly, the following type conversions are performed: | |||
| // | |||
| // | Go type | JavaScript type | Conversions back to interface{} | | |||
| // | --------------------- | --------------------- | ------------------------------- | | |||
| // | bool | Boolean | bool | | |||
| // | integers and floats | Number | float64 | | |||
| // | string | String | string | | |||
| // | []int8 | Int8Array | []int8 | | |||
| // | []int16 | Int16Array | []int16 | | |||
| // | []int32, []int | Int32Array | []int | | |||
| // | []uint8 | Uint8Array | []uint8 | | |||
| // | []uint16 | Uint16Array | []uint16 | | |||
| // | []uint32, []uint | Uint32Array | []uint | | |||
| // | []float32 | Float32Array | []float32 | | |||
| // | []float64 | Float64Array | []float64 | | |||
| // | all other slices | Array | []interface{} | | |||
| // | arrays | see slice type | see slice type | | |||
| // | functions | Function | func(...interface{}) *js.Object | | |||
| // | time.Time | Date | time.Time | | |||
| // | - | instanceof Node | *js.Object | | |||
| // | maps, structs | instanceof Object | map[string]interface{} | | |||
| // | |||
| // Additionally, for a struct containing a *js.Object field, only the content of the field will be passed to JavaScript and vice versa. | |||
| package js | |||
| // Object is a container for a native JavaScript object. Calls to its methods are treated specially by GopherJS and translated directly to their JavaScript syntax. A nil pointer to Object is equal to JavaScript's "null". Object can not be used as a map key. | |||
| type Object struct{ object *Object } | |||
| // Get returns the object's property with the given key. | |||
| func (o *Object) Get(key string) *Object { return o.object.Get(key) } | |||
| // Set assigns the value to the object's property with the given key. | |||
| func (o *Object) Set(key string, value interface{}) { o.object.Set(key, value) } | |||
| // Delete removes the object's property with the given key. | |||
| func (o *Object) Delete(key string) { o.object.Delete(key) } | |||
| // Length returns the object's "length" property, converted to int. | |||
| func (o *Object) Length() int { return o.object.Length() } | |||
| // Index returns the i'th element of an array. | |||
| func (o *Object) Index(i int) *Object { return o.object.Index(i) } | |||
| // SetIndex sets the i'th element of an array. | |||
| func (o *Object) SetIndex(i int, value interface{}) { o.object.SetIndex(i, value) } | |||
| // Call calls the object's method with the given name. | |||
| func (o *Object) Call(name string, args ...interface{}) *Object { return o.object.Call(name, args...) } | |||
| // Invoke calls the object itself. This will fail if it is not a function. | |||
| func (o *Object) Invoke(args ...interface{}) *Object { return o.object.Invoke(args...) } | |||
| // New creates a new instance of this type object. This will fail if it not a function (constructor). | |||
| func (o *Object) New(args ...interface{}) *Object { return o.object.New(args...) } | |||
| // Bool returns the object converted to bool according to JavaScript type conversions. | |||
| func (o *Object) Bool() bool { return o.object.Bool() } | |||
| // String returns the object converted to string according to JavaScript type conversions. | |||
| func (o *Object) String() string { return o.object.String() } | |||
| // Int returns the object converted to int according to JavaScript type conversions (parseInt). | |||
| func (o *Object) Int() int { return o.object.Int() } | |||
| // Int64 returns the object converted to int64 according to JavaScript type conversions (parseInt). | |||
| func (o *Object) Int64() int64 { return o.object.Int64() } | |||
| // Uint64 returns the object converted to uint64 according to JavaScript type conversions (parseInt). | |||
| func (o *Object) Uint64() uint64 { return o.object.Uint64() } | |||
| // Float returns the object converted to float64 according to JavaScript type conversions (parseFloat). | |||
| func (o *Object) Float() float64 { return o.object.Float() } | |||
| // Interface returns the object converted to interface{}. See GopherJS' README for details. | |||
| func (o *Object) Interface() interface{} { return o.object.Interface() } | |||
| // Unsafe returns the object as an uintptr, which can be converted via unsafe.Pointer. Not intended for public use. | |||
| func (o *Object) Unsafe() uintptr { return o.object.Unsafe() } | |||
| // Error encapsulates JavaScript errors. Those are turned into a Go panic and may be recovered, giving an *Error that holds the JavaScript error object. | |||
| type Error struct { | |||
| *Object | |||
| } | |||
| // Error returns the message of the encapsulated JavaScript error object. | |||
| func (err *Error) Error() string { | |||
| return "JavaScript error: " + err.Get("message").String() | |||
| } | |||
| // Stack returns the stack property of the encapsulated JavaScript error object. | |||
| func (err *Error) Stack() string { | |||
| return err.Get("stack").String() | |||
| } | |||
| // Global gives JavaScript's global object ("window" for browsers and "GLOBAL" for Node.js). | |||
| var Global *Object | |||
| // Module gives the value of the "module" variable set by Node.js. Hint: Set a module export with 'js.Module.Get("exports").Set("exportName", ...)'. | |||
| var Module *Object | |||
| // Undefined gives the JavaScript value "undefined". | |||
| var Undefined *Object | |||
| // Debugger gets compiled to JavaScript's "debugger;" statement. | |||
| func Debugger() {} | |||
| // InternalObject returns the internal JavaScript object that represents i. Not intended for public use. | |||
| func InternalObject(i interface{}) *Object { | |||
| return nil | |||
| } | |||
| // MakeFunc wraps a function and gives access to the values of JavaScript's "this" and "arguments" keywords. | |||
| func MakeFunc(fn func(this *Object, arguments []*Object) interface{}) *Object { | |||
| return Global.Call("$makeFunc", InternalObject(fn)) | |||
| } | |||
| // Keys returns the keys of the given JavaScript object. | |||
| func Keys(o *Object) []string { | |||
| if o == nil || o == Undefined { | |||
| return nil | |||
| } | |||
| a := Global.Get("Object").Call("keys", o) | |||
| s := make([]string, a.Length()) | |||
| for i := 0; i < a.Length(); i++ { | |||
| s[i] = a.Index(i).String() | |||
| } | |||
| return s | |||
| } | |||
| // MakeWrapper creates a JavaScript object which has wrappers for the exported methods of i. Use explicit getter and setter methods to expose struct fields to JavaScript. | |||
| func MakeWrapper(i interface{}) *Object { | |||
| v := InternalObject(i) | |||
| o := Global.Get("Object").New() | |||
| o.Set("__internal_object__", v) | |||
| methods := v.Get("constructor").Get("methods") | |||
| for i := 0; i < methods.Length(); i++ { | |||
| m := methods.Index(i) | |||
| if m.Get("pkg").String() != "" { // not exported | |||
| continue | |||
| } | |||
| o.Set(m.Get("name").String(), func(args ...*Object) *Object { | |||
| return Global.Call("$externalizeFunction", v.Get(m.Get("prop").String()), m.Get("typ"), true).Call("apply", v, args) | |||
| }) | |||
| } | |||
| return o | |||
| } | |||
| // NewArrayBuffer creates a JavaScript ArrayBuffer from a byte slice. | |||
| func NewArrayBuffer(b []byte) *Object { | |||
| slice := InternalObject(b) | |||
| offset := slice.Get("$offset").Int() | |||
| length := slice.Get("$length").Int() | |||
| return slice.Get("$array").Get("buffer").Call("slice", offset, offset+length) | |||
| } | |||
| // M is a simple map type. It is intended as a shorthand for JavaScript objects (before conversion). | |||
| type M map[string]interface{} | |||
| // S is a simple slice type. It is intended as a shorthand for JavaScript arrays (before conversion). | |||
| type S []interface{} | |||
| func init() { | |||
| // avoid dead code elimination | |||
| e := Error{} | |||
| _ = e | |||
| } | |||
| @@ -0,0 +1,191 @@ | |||
| All files in this repository are licensed as follows. If you contribute | |||
| to this repository, it is assumed that you license your contribution | |||
| under the same license unless you state otherwise. | |||
| All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. | |||
| This software is licensed under the LGPLv3, included below. | |||
| As a special exception to the GNU Lesser General Public License version 3 | |||
| ("LGPL3"), the copyright holders of this Library give you permission to | |||
| convey to a third party a Combined Work that links statically or dynamically | |||
| to this Library without providing any Minimal Corresponding Source or | |||
| Minimal Application Code as set out in 4d or providing the installation | |||
| information set out in section 4e, provided that you comply with the other | |||
| provisions of LGPL3 and provided that you meet, for the Application the | |||
| terms and conditions of the license(s) which apply to the Application. | |||
| Except as stated in this special exception, the provisions of LGPL3 will | |||
| continue to comply in full to this Library. If you modify this Library, you | |||
| may apply this exception to your version of this Library, but you are not | |||
| obliged to do so. If you do not wish to do so, delete this exception | |||
| statement from your version. This exception does not (and cannot) modify any | |||
| license terms which apply to the Application, with which you must still | |||
| comply. | |||
| GNU LESSER GENERAL PUBLIC LICENSE | |||
| Version 3, 29 June 2007 | |||
| Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | |||
| Everyone is permitted to copy and distribute verbatim copies | |||
| of this license document, but changing it is not allowed. | |||
| This version of the GNU Lesser General Public License incorporates | |||
| the terms and conditions of version 3 of the GNU General Public | |||
| License, supplemented by the additional permissions listed below. | |||
| 0. Additional Definitions. | |||
| As used herein, "this License" refers to version 3 of the GNU Lesser | |||
| General Public License, and the "GNU GPL" refers to version 3 of the GNU | |||
| General Public License. | |||
| "The Library" refers to a covered work governed by this License, | |||
| other than an Application or a Combined Work as defined below. | |||
| An "Application" is any work that makes use of an interface provided | |||
| by the Library, but which is not otherwise based on the Library. | |||
| Defining a subclass of a class defined by the Library is deemed a mode | |||
| of using an interface provided by the Library. | |||
| A "Combined Work" is a work produced by combining or linking an | |||
| Application with the Library. The particular version of the Library | |||
| with which the Combined Work was made is also called the "Linked | |||
| Version". | |||
| The "Minimal Corresponding Source" for a Combined Work means the | |||
| Corresponding Source for the Combined Work, excluding any source code | |||
| for portions of the Combined Work that, considered in isolation, are | |||
| based on the Application, and not on the Linked Version. | |||
| The "Corresponding Application Code" for a Combined Work means the | |||
| object code and/or source code for the Application, including any data | |||
| and utility programs needed for reproducing the Combined Work from the | |||
| Application, but excluding the System Libraries of the Combined Work. | |||
| 1. Exception to Section 3 of the GNU GPL. | |||
| You may convey a covered work under sections 3 and 4 of this License | |||
| without being bound by section 3 of the GNU GPL. | |||
| 2. Conveying Modified Versions. | |||
| If you modify a copy of the Library, and, in your modifications, a | |||
| facility refers to a function or data to be supplied by an Application | |||
| that uses the facility (other than as an argument passed when the | |||
| facility is invoked), then you may convey a copy of the modified | |||
| version: | |||
| a) under this License, provided that you make a good faith effort to | |||
| ensure that, in the event an Application does not supply the | |||
| function or data, the facility still operates, and performs | |||
| whatever part of its purpose remains meaningful, or | |||
| b) under the GNU GPL, with none of the additional permissions of | |||
| this License applicable to that copy. | |||
| 3. Object Code Incorporating Material from Library Header Files. | |||
| The object code form of an Application may incorporate material from | |||
| a header file that is part of the Library. You may convey such object | |||
| code under terms of your choice, provided that, if the incorporated | |||
| material is not limited to numerical parameters, data structure | |||
| layouts and accessors, or small macros, inline functions and templates | |||
| (ten or fewer lines in length), you do both of the following: | |||
| a) Give prominent notice with each copy of the object code that the | |||
| Library is used in it and that the Library and its use are | |||
| covered by this License. | |||
| b) Accompany the object code with a copy of the GNU GPL and this license | |||
| document. | |||
| 4. Combined Works. | |||
| You may convey a Combined Work under terms of your choice that, | |||
| taken together, effectively do not restrict modification of the | |||
| portions of the Library contained in the Combined Work and reverse | |||
| engineering for debugging such modifications, if you also do each of | |||
| the following: | |||
| a) Give prominent notice with each copy of the Combined Work that | |||
| the Library is used in it and that the Library and its use are | |||
| covered by this License. | |||
| b) Accompany the Combined Work with a copy of the GNU GPL and this license | |||
| document. | |||
| c) For a Combined Work that displays copyright notices during | |||
| execution, include the copyright notice for the Library among | |||
| these notices, as well as a reference directing the user to the | |||
| copies of the GNU GPL and this license document. | |||
| d) Do one of the following: | |||
| 0) Convey the Minimal Corresponding Source under the terms of this | |||
| License, and the Corresponding Application Code in a form | |||
| suitable for, and under terms that permit, the user to | |||
| recombine or relink the Application with a modified version of | |||
| the Linked Version to produce a modified Combined Work, in the | |||
| manner specified by section 6 of the GNU GPL for conveying | |||
| Corresponding Source. | |||
| 1) Use a suitable shared library mechanism for linking with the | |||
| Library. A suitable mechanism is one that (a) uses at run time | |||
| a copy of the Library already present on the user's computer | |||
| system, and (b) will operate properly with a modified version | |||
| of the Library that is interface-compatible with the Linked | |||
| Version. | |||
| e) Provide Installation Information, but only if you would otherwise | |||
| be required to provide such information under section 6 of the | |||
| GNU GPL, and only to the extent that such information is | |||
| necessary to install and execute a modified version of the | |||
| Combined Work produced by recombining or relinking the | |||
| Application with a modified version of the Linked Version. (If | |||
| you use option 4d0, the Installation Information must accompany | |||
| the Minimal Corresponding Source and Corresponding Application | |||
| Code. If you use option 4d1, you must provide the Installation | |||
| Information in the manner specified by section 6 of the GNU GPL | |||
| for conveying Corresponding Source.) | |||
| 5. Combined Libraries. | |||
| You may place library facilities that are a work based on the | |||
| Library side by side in a single library together with other library | |||
| facilities that are not Applications and are not covered by this | |||
| License, and convey such a combined library under terms of your | |||
| choice, if you do both of the following: | |||
| a) Accompany the combined library with a copy of the same work based | |||
| on the Library, uncombined with any other library facilities, | |||
| conveyed under the terms of this License. | |||
| b) Give prominent notice with the combined library that part of it | |||
| is a work based on the Library, and explaining where to find the | |||
| accompanying uncombined form of the same work. | |||
| 6. Revised Versions of the GNU Lesser General Public License. | |||
| The Free Software Foundation may publish revised and/or new versions | |||
| of the GNU Lesser General Public License from time to time. Such new | |||
| versions will be similar in spirit to the present version, but may | |||
| differ in detail to address new problems or concerns. | |||
| Each version is given a distinguishing version number. If the | |||
| Library as you received it specifies that a certain numbered version | |||
| of the GNU Lesser General Public License "or any later version" | |||
| applies to it, you have the option of following the terms and | |||
| conditions either of that published version or of any later version | |||
| published by the Free Software Foundation. If the Library as you | |||
| received it does not specify a version number of the GNU Lesser | |||
| General Public License, you may choose any version of the GNU Lesser | |||
| General Public License ever published by the Free Software Foundation. | |||
| If the Library as you received it specifies that a proxy can decide | |||
| whether future versions of the GNU Lesser General Public License shall | |||
| apply, that proxy's public statement of acceptance of any version is | |||
| permanent authorization for you to choose that version for the | |||
| Library. | |||
| @@ -0,0 +1,11 @@ | |||
| default: check | |||
| check: | |||
| go test && go test -compiler gccgo | |||
| docs: | |||
| godoc2md github.com/juju/errors > README.md | |||
| sed -i 's|\[godoc-link-here\]|[](https://godoc.org/github.com/juju/errors)|' README.md | |||
| .PHONY: default check docs | |||
| @@ -0,0 +1,536 @@ | |||
| # errors | |||
| import "github.com/juju/errors" | |||
| [](https://godoc.org/github.com/juju/errors) | |||
| The juju/errors provides an easy way to annotate errors without losing the | |||
| orginal error context. | |||
| The exported `New` and `Errorf` functions are designed to replace the | |||
| `errors.New` and `fmt.Errorf` functions respectively. The same underlying | |||
| error is there, but the package also records the location at which the error | |||
| was created. | |||
| A primary use case for this library is to add extra context any time an | |||
| error is returned from a function. | |||
| if err := SomeFunc(); err != nil { | |||
| return err | |||
| } | |||
| This instead becomes: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| which just records the file and line number of the Trace call, or | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Annotate(err, "more context") | |||
| } | |||
| which also adds an annotation to the error. | |||
| When you want to check to see if an error is of a particular type, a helper | |||
| function is normally exported by the package that returned the error, like the | |||
| `os` package does. The underlying cause of the error is available using the | |||
| `Cause` function. | |||
| os.IsNotExist(errors.Cause(err)) | |||
| The result of the `Error()` call on an annotated error is the annotations joined | |||
| with colons, then the result of the `Error()` method for the underlying error | |||
| that was the cause. | |||
| err := errors.Errorf("original") | |||
| err = errors.Annotatef(err, "context") | |||
| err = errors.Annotatef(err, "more context") | |||
| err.Error() -> "more context: context: original" | |||
| Obviously recording the file, line and functions is not very useful if you | |||
| cannot get them back out again. | |||
| errors.ErrorStack(err) | |||
| will return something like: | |||
| first error | |||
| github.com/juju/errors/annotation_test.go:193: | |||
| github.com/juju/errors/annotation_test.go:194: annotation | |||
| github.com/juju/errors/annotation_test.go:195: | |||
| github.com/juju/errors/annotation_test.go:196: more context | |||
| github.com/juju/errors/annotation_test.go:197: | |||
| The first error was generated by an external system, so there was no location | |||
| associated. The second, fourth, and last lines were generated with Trace calls, | |||
| and the other two through Annotate. | |||
| Sometimes when responding to an error you want to return a more specific error | |||
| for the situation. | |||
| if err := FindField(field); err != nil { | |||
| return errors.Wrap(err, errors.NotFoundf(field)) | |||
| } | |||
| This returns an error where the complete error stack is still available, and | |||
| `errors.Cause()` will return the `NotFound` error. | |||
| ## func AlreadyExistsf | |||
| ``` go | |||
| func AlreadyExistsf(format string, args ...interface{}) error | |||
| ``` | |||
| AlreadyExistsf returns an error which satisfies IsAlreadyExists(). | |||
| ## func Annotate | |||
| ``` go | |||
| func Annotate(other error, message string) error | |||
| ``` | |||
| Annotate is used to add extra context to an existing error. The location of | |||
| the Annotate call is recorded with the annotations. The file, line and | |||
| function are also recorded. | |||
| For example: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Annotate(err, "failed to frombulate") | |||
| } | |||
| ## func Annotatef | |||
| ``` go | |||
| func Annotatef(other error, format string, args ...interface{}) error | |||
| ``` | |||
| Annotatef is used to add extra context to an existing error. The location of | |||
| the Annotate call is recorded with the annotations. The file, line and | |||
| function are also recorded. | |||
| For example: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Annotatef(err, "failed to frombulate the %s", arg) | |||
| } | |||
| ## func Cause | |||
| ``` go | |||
| func Cause(err error) error | |||
| ``` | |||
| Cause returns the cause of the given error. This will be either the | |||
| original error, or the result of a Wrap or Mask call. | |||
| Cause is the usual way to diagnose errors that may have been wrapped by | |||
| the other errors functions. | |||
| ## func DeferredAnnotatef | |||
| ``` go | |||
| func DeferredAnnotatef(err *error, format string, args ...interface{}) | |||
| ``` | |||
| DeferredAnnotatef annotates the given error (when it is not nil) with the given | |||
| format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef | |||
| does nothing. This method is used in a defer statement in order to annotate any | |||
| resulting error with the same message. | |||
| For example: | |||
| defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg) | |||
| ## func Details | |||
| ``` go | |||
| func Details(err error) string | |||
| ``` | |||
| Details returns information about the stack of errors wrapped by err, in | |||
| the format: | |||
| [{filename:99: error one} {otherfile:55: cause of error one}] | |||
| This is a terse alternative to ErrorStack as it returns a single line. | |||
| ## func ErrorStack | |||
| ``` go | |||
| func ErrorStack(err error) string | |||
| ``` | |||
| ErrorStack returns a string representation of the annotated error. If the | |||
| error passed as the parameter is not an annotated error, the result is | |||
| simply the result of the Error() method on that error. | |||
| If the error is an annotated error, a multi-line string is returned where | |||
| each line represents one entry in the annotation stack. The full filename | |||
| from the call stack is used in the output. | |||
| first error | |||
| github.com/juju/errors/annotation_test.go:193: | |||
| github.com/juju/errors/annotation_test.go:194: annotation | |||
| github.com/juju/errors/annotation_test.go:195: | |||
| github.com/juju/errors/annotation_test.go:196: more context | |||
| github.com/juju/errors/annotation_test.go:197: | |||
| ## func Errorf | |||
| ``` go | |||
| func Errorf(format string, args ...interface{}) error | |||
| ``` | |||
| Errorf creates a new annotated error and records the location that the | |||
| error is created. This should be a drop in replacement for fmt.Errorf. | |||
| For example: | |||
| return errors.Errorf("validation failed: %s", message) | |||
| ## func IsAlreadyExists | |||
| ``` go | |||
| func IsAlreadyExists(err error) bool | |||
| ``` | |||
| IsAlreadyExists reports whether the error was created with | |||
| AlreadyExistsf() or NewAlreadyExists(). | |||
| ## func IsNotFound | |||
| ``` go | |||
| func IsNotFound(err error) bool | |||
| ``` | |||
| IsNotFound reports whether err was created with NotFoundf() or | |||
| NewNotFound(). | |||
| ## func IsNotImplemented | |||
| ``` go | |||
| func IsNotImplemented(err error) bool | |||
| ``` | |||
| IsNotImplemented reports whether err was created with | |||
| NotImplementedf() or NewNotImplemented(). | |||
| ## func IsNotSupported | |||
| ``` go | |||
| func IsNotSupported(err error) bool | |||
| ``` | |||
| IsNotSupported reports whether the error was created with | |||
| NotSupportedf() or NewNotSupported(). | |||
| ## func IsNotValid | |||
| ``` go | |||
| func IsNotValid(err error) bool | |||
| ``` | |||
| IsNotValid reports whether the error was created with NotValidf() or | |||
| NewNotValid(). | |||
| ## func IsUnauthorized | |||
| ``` go | |||
| func IsUnauthorized(err error) bool | |||
| ``` | |||
| IsUnauthorized reports whether err was created with Unauthorizedf() or | |||
| NewUnauthorized(). | |||
| ## func Mask | |||
| ``` go | |||
| func Mask(other error) error | |||
| ``` | |||
| Mask hides the underlying error type, and records the location of the masking. | |||
| ## func Maskf | |||
| ``` go | |||
| func Maskf(other error, format string, args ...interface{}) error | |||
| ``` | |||
| Mask masks the given error with the given format string and arguments (like | |||
| fmt.Sprintf), returning a new error that maintains the error stack, but | |||
| hides the underlying error type. The error string still contains the full | |||
| annotations. If you want to hide the annotations, call Wrap. | |||
| ## func New | |||
| ``` go | |||
| func New(message string) error | |||
| ``` | |||
| New is a drop in replacement for the standard libary errors module that records | |||
| the location that the error is created. | |||
| For example: | |||
| return errors.New("validation failed") | |||
| ## func NewAlreadyExists | |||
| ``` go | |||
| func NewAlreadyExists(err error, msg string) error | |||
| ``` | |||
| NewAlreadyExists returns an error which wraps err and satisfies | |||
| IsAlreadyExists(). | |||
| ## func NewNotFound | |||
| ``` go | |||
| func NewNotFound(err error, msg string) error | |||
| ``` | |||
| NewNotFound returns an error which wraps err that satisfies | |||
| IsNotFound(). | |||
| ## func NewNotImplemented | |||
| ``` go | |||
| func NewNotImplemented(err error, msg string) error | |||
| ``` | |||
| NewNotImplemented returns an error which wraps err and satisfies | |||
| IsNotImplemented(). | |||
| ## func NewNotSupported | |||
| ``` go | |||
| func NewNotSupported(err error, msg string) error | |||
| ``` | |||
| NewNotSupported returns an error which wraps err and satisfies | |||
| IsNotSupported(). | |||
| ## func NewNotValid | |||
| ``` go | |||
| func NewNotValid(err error, msg string) error | |||
| ``` | |||
| NewNotValid returns an error which wraps err and satisfies IsNotValid(). | |||
| ## func NewUnauthorized | |||
| ``` go | |||
| func NewUnauthorized(err error, msg string) error | |||
| ``` | |||
| NewUnauthorized returns an error which wraps err and satisfies | |||
| IsUnauthorized(). | |||
| ## func NotFoundf | |||
| ``` go | |||
| func NotFoundf(format string, args ...interface{}) error | |||
| ``` | |||
| NotFoundf returns an error which satisfies IsNotFound(). | |||
| ## func NotImplementedf | |||
| ``` go | |||
| func NotImplementedf(format string, args ...interface{}) error | |||
| ``` | |||
| NotImplementedf returns an error which satisfies IsNotImplemented(). | |||
| ## func NotSupportedf | |||
| ``` go | |||
| func NotSupportedf(format string, args ...interface{}) error | |||
| ``` | |||
| NotSupportedf returns an error which satisfies IsNotSupported(). | |||
| ## func NotValidf | |||
| ``` go | |||
| func NotValidf(format string, args ...interface{}) error | |||
| ``` | |||
| NotValidf returns an error which satisfies IsNotValid(). | |||
| ## func Trace | |||
| ``` go | |||
| func Trace(other error) error | |||
| ``` | |||
| Trace adds the location of the Trace call to the stack. The Cause of the | |||
| resulting error is the same as the error parameter. If the other error is | |||
| nil, the result will be nil. | |||
| For example: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| ## func Unauthorizedf | |||
| ``` go | |||
| func Unauthorizedf(format string, args ...interface{}) error | |||
| ``` | |||
| Unauthorizedf returns an error which satisfies IsUnauthorized(). | |||
| ## func Wrap | |||
| ``` go | |||
| func Wrap(other, newDescriptive error) error | |||
| ``` | |||
| Wrap changes the Cause of the error. The location of the Wrap call is also | |||
| stored in the error stack. | |||
| For example: | |||
| if err := SomeFunc(); err != nil { | |||
| newErr := &packageError{"more context", private_value} | |||
| return errors.Wrap(err, newErr) | |||
| } | |||
| ## func Wrapf | |||
| ``` go | |||
| func Wrapf(other, newDescriptive error, format string, args ...interface{}) error | |||
| ``` | |||
| Wrapf changes the Cause of the error, and adds an annotation. The location | |||
| of the Wrap call is also stored in the error stack. | |||
| For example: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Wrapf(err, simpleErrorType, "invalid value %q", value) | |||
| } | |||
| ## type Err | |||
| ``` go | |||
| type Err struct { | |||
| // contains filtered or unexported fields | |||
| } | |||
| ``` | |||
| Err holds a description of an error along with information about | |||
| where the error was created. | |||
| It may be embedded in custom error types to add extra information that | |||
| this errors package can understand. | |||
| ### func NewErr | |||
| ``` go | |||
| func NewErr(format string, args ...interface{}) Err | |||
| ``` | |||
| NewErr is used to return an Err for the purpose of embedding in other | |||
| structures. The location is not specified, and needs to be set with a call | |||
| to SetLocation. | |||
| For example: | |||
| type FooError struct { | |||
| errors.Err | |||
| code int | |||
| } | |||
| func NewFooError(code int) error { | |||
| err := &FooError{errors.NewErr("foo"), code} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| ### func (\*Err) Cause | |||
| ``` go | |||
| func (e *Err) Cause() error | |||
| ``` | |||
| The Cause of an error is the most recent error in the error stack that | |||
| meets one of these criteria: the original error that was raised; the new | |||
| error that was passed into the Wrap function; the most recently masked | |||
| error; or nil if the error itself is considered the Cause. Normally this | |||
| method is not invoked directly, but instead through the Cause stand alone | |||
| function. | |||
| ### func (\*Err) Error | |||
| ``` go | |||
| func (e *Err) Error() string | |||
| ``` | |||
| Error implements error.Error. | |||
| ### func (\*Err) Location | |||
| ``` go | |||
| func (e *Err) Location() (filename string, line int) | |||
| ``` | |||
| Location is the file and line of where the error was most recently | |||
| created or annotated. | |||
| ### func (\*Err) Message | |||
| ``` go | |||
| func (e *Err) Message() string | |||
| ``` | |||
| Message returns the message stored with the most recent location. This is | |||
| the empty string if the most recent call was Trace, or the message stored | |||
| with Annotate or Mask. | |||
| ### func (\*Err) SetLocation | |||
| ``` go | |||
| func (e *Err) SetLocation(callDepth int) | |||
| ``` | |||
| SetLocation records the source location of the error at callDepth stack | |||
| frames above the call. | |||
| ### func (\*Err) StackTrace | |||
| ``` go | |||
| func (e *Err) StackTrace() []string | |||
| ``` | |||
| StackTrace returns one string for each location recorded in the stack of | |||
| errors. The first value is the originating error, with a line for each | |||
| other annotation or tracing of the error. | |||
| ### func (\*Err) Underlying | |||
| ``` go | |||
| func (e *Err) Underlying() error | |||
| ``` | |||
| Underlying returns the previous error in the error stack, if any. A client | |||
| should not ever really call this method. It is used to build the error | |||
| stack and should not be introspected by client calls. Or more | |||
| specifically, clients should not depend on anything but the `Cause` of an | |||
| error. | |||
| - - - | |||
| Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) | |||
| @@ -0,0 +1,81 @@ | |||
| // Copyright 2013, 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| /* | |||
| [godoc-link-here] | |||
| The juju/errors provides an easy way to annotate errors without losing the | |||
| orginal error context. | |||
| The exported `New` and `Errorf` functions are designed to replace the | |||
| `errors.New` and `fmt.Errorf` functions respectively. The same underlying | |||
| error is there, but the package also records the location at which the error | |||
| was created. | |||
| A primary use case for this library is to add extra context any time an | |||
| error is returned from a function. | |||
| if err := SomeFunc(); err != nil { | |||
| return err | |||
| } | |||
| This instead becomes: | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Trace(err) | |||
| } | |||
| which just records the file and line number of the Trace call, or | |||
| if err := SomeFunc(); err != nil { | |||
| return errors.Annotate(err, "more context") | |||
| } | |||
| which also adds an annotation to the error. | |||
| When you want to check to see if an error is of a particular type, a helper | |||
| function is normally exported by the package that returned the error, like the | |||
| `os` package does. The underlying cause of the error is available using the | |||
| `Cause` function. | |||
| os.IsNotExist(errors.Cause(err)) | |||
| The result of the `Error()` call on an annotated error is the annotations joined | |||
| with colons, then the result of the `Error()` method for the underlying error | |||
| that was the cause. | |||
| err := errors.Errorf("original") | |||
| err = errors.Annotatef(err, "context") | |||
| err = errors.Annotatef(err, "more context") | |||
| err.Error() -> "more context: context: original" | |||
| Obviously recording the file, line and functions is not very useful if you | |||
| cannot get them back out again. | |||
| errors.ErrorStack(err) | |||
| will return something like: | |||
| first error | |||
| github.com/juju/errors/annotation_test.go:193: | |||
| github.com/juju/errors/annotation_test.go:194: annotation | |||
| github.com/juju/errors/annotation_test.go:195: | |||
| github.com/juju/errors/annotation_test.go:196: more context | |||
| github.com/juju/errors/annotation_test.go:197: | |||
| The first error was generated by an external system, so there was no location | |||
| associated. The second, fourth, and last lines were generated with Trace calls, | |||
| and the other two through Annotate. | |||
| Sometimes when responding to an error you want to return a more specific error | |||
| for the situation. | |||
| if err := FindField(field); err != nil { | |||
| return errors.Wrap(err, errors.NotFoundf(field)) | |||
| } | |||
| This returns an error where the complete error stack is still available, and | |||
| `errors.Cause()` will return the `NotFound` error. | |||
| */ | |||
| package errors | |||
| @@ -0,0 +1,145 @@ | |||
| // Copyright 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "fmt" | |||
| "reflect" | |||
| "runtime" | |||
| ) | |||
| // Err holds a description of an error along with information about | |||
| // where the error was created. | |||
| // | |||
| // It may be embedded in custom error types to add extra information that | |||
| // this errors package can understand. | |||
| type Err struct { | |||
| // message holds an annotation of the error. | |||
| message string | |||
| // cause holds the cause of the error as returned | |||
| // by the Cause method. | |||
| cause error | |||
| // previous holds the previous error in the error stack, if any. | |||
| previous error | |||
| // file and line hold the source code location where the error was | |||
| // created. | |||
| file string | |||
| line int | |||
| } | |||
| // NewErr is used to return an Err for the purpose of embedding in other | |||
| // structures. The location is not specified, and needs to be set with a call | |||
| // to SetLocation. | |||
| // | |||
| // For example: | |||
| // type FooError struct { | |||
| // errors.Err | |||
| // code int | |||
| // } | |||
| // | |||
| // func NewFooError(code int) error { | |||
| // err := &FooError{errors.NewErr("foo"), code} | |||
| // err.SetLocation(1) | |||
| // return err | |||
| // } | |||
| func NewErr(format string, args ...interface{}) Err { | |||
| return Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| } | |||
| } | |||
| // NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other | |||
| // structures. The location is not specified, and needs to be set with a call | |||
| // to SetLocation. | |||
| // | |||
| // For example: | |||
| // type FooError struct { | |||
| // errors.Err | |||
| // code int | |||
| // } | |||
| // | |||
| // func (e *FooError) Annotate(format string, args ...interface{}) error { | |||
| // err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code} | |||
| // err.SetLocation(1) | |||
| // return err | |||
| // }) | |||
| func NewErrWithCause(other error, format string, args ...interface{}) Err { | |||
| return Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| cause: Cause(other), | |||
| previous: other, | |||
| } | |||
| } | |||
| // Location is the file and line of where the error was most recently | |||
| // created or annotated. | |||
| func (e *Err) Location() (filename string, line int) { | |||
| return e.file, e.line | |||
| } | |||
| // Underlying returns the previous error in the error stack, if any. A client | |||
| // should not ever really call this method. It is used to build the error | |||
| // stack and should not be introspected by client calls. Or more | |||
| // specifically, clients should not depend on anything but the `Cause` of an | |||
| // error. | |||
| func (e *Err) Underlying() error { | |||
| return e.previous | |||
| } | |||
| // The Cause of an error is the most recent error in the error stack that | |||
| // meets one of these criteria: the original error that was raised; the new | |||
| // error that was passed into the Wrap function; the most recently masked | |||
| // error; or nil if the error itself is considered the Cause. Normally this | |||
| // method is not invoked directly, but instead through the Cause stand alone | |||
| // function. | |||
| func (e *Err) Cause() error { | |||
| return e.cause | |||
| } | |||
| // Message returns the message stored with the most recent location. This is | |||
| // the empty string if the most recent call was Trace, or the message stored | |||
| // with Annotate or Mask. | |||
| func (e *Err) Message() string { | |||
| return e.message | |||
| } | |||
| // Error implements error.Error. | |||
| func (e *Err) Error() string { | |||
| // We want to walk up the stack of errors showing the annotations | |||
| // as long as the cause is the same. | |||
| err := e.previous | |||
| if !sameError(Cause(err), e.cause) && e.cause != nil { | |||
| err = e.cause | |||
| } | |||
| switch { | |||
| case err == nil: | |||
| return e.message | |||
| case e.message == "": | |||
| return err.Error() | |||
| } | |||
| return fmt.Sprintf("%s: %v", e.message, err) | |||
| } | |||
| // SetLocation records the source location of the error at callDepth stack | |||
| // frames above the call. | |||
| func (e *Err) SetLocation(callDepth int) { | |||
| _, file, line, _ := runtime.Caller(callDepth + 1) | |||
| e.file = trimGoPath(file) | |||
| e.line = line | |||
| } | |||
| // StackTrace returns one string for each location recorded in the stack of | |||
| // errors. The first value is the originating error, with a line for each | |||
| // other annotation or tracing of the error. | |||
| func (e *Err) StackTrace() []string { | |||
| return errorStack(e) | |||
| } | |||
| // Ideally we'd have a way to check identity, but deep equals will do. | |||
| func sameError(e1, e2 error) bool { | |||
| return reflect.DeepEqual(e1, e2) | |||
| } | |||
| @@ -0,0 +1,284 @@ | |||
| // Copyright 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "fmt" | |||
| ) | |||
| // wrap is a helper to construct an *wrapper. | |||
| func wrap(err error, format, suffix string, args ...interface{}) Err { | |||
| newErr := Err{ | |||
| message: fmt.Sprintf(format+suffix, args...), | |||
| previous: err, | |||
| } | |||
| newErr.SetLocation(2) | |||
| return newErr | |||
| } | |||
| // notFound represents an error when something has not been found. | |||
| type notFound struct { | |||
| Err | |||
| } | |||
| // NotFoundf returns an error which satisfies IsNotFound(). | |||
| func NotFoundf(format string, args ...interface{}) error { | |||
| return ¬Found{wrap(nil, format, " not found", args...)} | |||
| } | |||
| // NewNotFound returns an error which wraps err that satisfies | |||
| // IsNotFound(). | |||
| func NewNotFound(err error, msg string) error { | |||
| return ¬Found{wrap(err, msg, "")} | |||
| } | |||
| // IsNotFound reports whether err was created with NotFoundf() or | |||
| // NewNotFound(). | |||
| func IsNotFound(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notFound) | |||
| return ok | |||
| } | |||
| // userNotFound represents an error when an inexistent user is looked up. | |||
| type userNotFound struct { | |||
| Err | |||
| } | |||
| // UserNotFoundf returns an error which satisfies IsUserNotFound(). | |||
| func UserNotFoundf(format string, args ...interface{}) error { | |||
| return &userNotFound{wrap(nil, format, " user not found", args...)} | |||
| } | |||
| // NewUserNotFound returns an error which wraps err and satisfies | |||
| // IsUserNotFound(). | |||
| func NewUserNotFound(err error, msg string) error { | |||
| return &userNotFound{wrap(err, msg, "")} | |||
| } | |||
| // IsUserNotFound reports whether err was created with UserNotFoundf() or | |||
| // NewUserNotFound(). | |||
| func IsUserNotFound(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*userNotFound) | |||
| return ok | |||
| } | |||
| // unauthorized represents an error when an operation is unauthorized. | |||
| type unauthorized struct { | |||
| Err | |||
| } | |||
| // Unauthorizedf returns an error which satisfies IsUnauthorized(). | |||
| func Unauthorizedf(format string, args ...interface{}) error { | |||
| return &unauthorized{wrap(nil, format, "", args...)} | |||
| } | |||
| // NewUnauthorized returns an error which wraps err and satisfies | |||
| // IsUnauthorized(). | |||
| func NewUnauthorized(err error, msg string) error { | |||
| return &unauthorized{wrap(err, msg, "")} | |||
| } | |||
| // IsUnauthorized reports whether err was created with Unauthorizedf() or | |||
| // NewUnauthorized(). | |||
| func IsUnauthorized(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*unauthorized) | |||
| return ok | |||
| } | |||
| // notImplemented represents an error when something is not | |||
| // implemented. | |||
| type notImplemented struct { | |||
| Err | |||
| } | |||
| // NotImplementedf returns an error which satisfies IsNotImplemented(). | |||
| func NotImplementedf(format string, args ...interface{}) error { | |||
| return ¬Implemented{wrap(nil, format, " not implemented", args...)} | |||
| } | |||
| // NewNotImplemented returns an error which wraps err and satisfies | |||
| // IsNotImplemented(). | |||
| func NewNotImplemented(err error, msg string) error { | |||
| return ¬Implemented{wrap(err, msg, "")} | |||
| } | |||
| // IsNotImplemented reports whether err was created with | |||
| // NotImplementedf() or NewNotImplemented(). | |||
| func IsNotImplemented(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notImplemented) | |||
| return ok | |||
| } | |||
| // alreadyExists represents and error when something already exists. | |||
| type alreadyExists struct { | |||
| Err | |||
| } | |||
| // AlreadyExistsf returns an error which satisfies IsAlreadyExists(). | |||
| func AlreadyExistsf(format string, args ...interface{}) error { | |||
| return &alreadyExists{wrap(nil, format, " already exists", args...)} | |||
| } | |||
| // NewAlreadyExists returns an error which wraps err and satisfies | |||
| // IsAlreadyExists(). | |||
| func NewAlreadyExists(err error, msg string) error { | |||
| return &alreadyExists{wrap(err, msg, "")} | |||
| } | |||
| // IsAlreadyExists reports whether the error was created with | |||
| // AlreadyExistsf() or NewAlreadyExists(). | |||
| func IsAlreadyExists(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*alreadyExists) | |||
| return ok | |||
| } | |||
| // notSupported represents an error when something is not supported. | |||
| type notSupported struct { | |||
| Err | |||
| } | |||
| // NotSupportedf returns an error which satisfies IsNotSupported(). | |||
| func NotSupportedf(format string, args ...interface{}) error { | |||
| return ¬Supported{wrap(nil, format, " not supported", args...)} | |||
| } | |||
| // NewNotSupported returns an error which wraps err and satisfies | |||
| // IsNotSupported(). | |||
| func NewNotSupported(err error, msg string) error { | |||
| return ¬Supported{wrap(err, msg, "")} | |||
| } | |||
| // IsNotSupported reports whether the error was created with | |||
| // NotSupportedf() or NewNotSupported(). | |||
| func IsNotSupported(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notSupported) | |||
| return ok | |||
| } | |||
| // notValid represents an error when something is not valid. | |||
| type notValid struct { | |||
| Err | |||
| } | |||
| // NotValidf returns an error which satisfies IsNotValid(). | |||
| func NotValidf(format string, args ...interface{}) error { | |||
| return ¬Valid{wrap(nil, format, " not valid", args...)} | |||
| } | |||
| // NewNotValid returns an error which wraps err and satisfies IsNotValid(). | |||
| func NewNotValid(err error, msg string) error { | |||
| return ¬Valid{wrap(err, msg, "")} | |||
| } | |||
| // IsNotValid reports whether the error was created with NotValidf() or | |||
| // NewNotValid(). | |||
| func IsNotValid(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notValid) | |||
| return ok | |||
| } | |||
| // notProvisioned represents an error when something is not yet provisioned. | |||
| type notProvisioned struct { | |||
| Err | |||
| } | |||
| // NotProvisionedf returns an error which satisfies IsNotProvisioned(). | |||
| func NotProvisionedf(format string, args ...interface{}) error { | |||
| return ¬Provisioned{wrap(nil, format, " not provisioned", args...)} | |||
| } | |||
| // NewNotProvisioned returns an error which wraps err that satisfies | |||
| // IsNotProvisioned(). | |||
| func NewNotProvisioned(err error, msg string) error { | |||
| return ¬Provisioned{wrap(err, msg, "")} | |||
| } | |||
| // IsNotProvisioned reports whether err was created with NotProvisionedf() or | |||
| // NewNotProvisioned(). | |||
| func IsNotProvisioned(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notProvisioned) | |||
| return ok | |||
| } | |||
| // notAssigned represents an error when something is not yet assigned to | |||
| // something else. | |||
| type notAssigned struct { | |||
| Err | |||
| } | |||
| // NotAssignedf returns an error which satisfies IsNotAssigned(). | |||
| func NotAssignedf(format string, args ...interface{}) error { | |||
| return ¬Assigned{wrap(nil, format, " not assigned", args...)} | |||
| } | |||
| // NewNotAssigned returns an error which wraps err that satisfies | |||
| // IsNotAssigned(). | |||
| func NewNotAssigned(err error, msg string) error { | |||
| return ¬Assigned{wrap(err, msg, "")} | |||
| } | |||
| // IsNotAssigned reports whether err was created with NotAssignedf() or | |||
| // NewNotAssigned(). | |||
| func IsNotAssigned(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*notAssigned) | |||
| return ok | |||
| } | |||
| // badRequest represents an error when a request has bad parameters. | |||
| type badRequest struct { | |||
| Err | |||
| } | |||
| // BadRequestf returns an error which satisfies IsBadRequest(). | |||
| func BadRequestf(format string, args ...interface{}) error { | |||
| return &badRequest{wrap(nil, format, "", args...)} | |||
| } | |||
| // NewBadRequest returns an error which wraps err that satisfies | |||
| // IsBadRequest(). | |||
| func NewBadRequest(err error, msg string) error { | |||
| return &badRequest{wrap(err, msg, "")} | |||
| } | |||
| // IsBadRequest reports whether err was created with BadRequestf() or | |||
| // NewBadRequest(). | |||
| func IsBadRequest(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*badRequest) | |||
| return ok | |||
| } | |||
| // methodNotAllowed represents an error when an HTTP request | |||
| // is made with an inappropriate method. | |||
| type methodNotAllowed struct { | |||
| Err | |||
| } | |||
| // MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed(). | |||
| func MethodNotAllowedf(format string, args ...interface{}) error { | |||
| return &methodNotAllowed{wrap(nil, format, "", args...)} | |||
| } | |||
| // NewMethodNotAllowed returns an error which wraps err that satisfies | |||
| // IsMethodNotAllowed(). | |||
| func NewMethodNotAllowed(err error, msg string) error { | |||
| return &methodNotAllowed{wrap(err, msg, "")} | |||
| } | |||
| // IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or | |||
| // NewMethodNotAllowed(). | |||
| func IsMethodNotAllowed(err error) bool { | |||
| err = Cause(err) | |||
| _, ok := err.(*methodNotAllowed) | |||
| return ok | |||
| } | |||
| @@ -0,0 +1,330 @@ | |||
| // Copyright 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "fmt" | |||
| "strings" | |||
| ) | |||
| // New is a drop in replacement for the standard libary errors module that records | |||
| // the location that the error is created. | |||
| // | |||
| // For example: | |||
| // return errors.New("validation failed") | |||
| // | |||
| func New(message string) error { | |||
| err := &Err{message: message} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Errorf creates a new annotated error and records the location that the | |||
| // error is created. This should be a drop in replacement for fmt.Errorf. | |||
| // | |||
| // For example: | |||
| // return errors.Errorf("validation failed: %s", message) | |||
| // | |||
| func Errorf(format string, args ...interface{}) error { | |||
| err := &Err{message: fmt.Sprintf(format, args...)} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Trace adds the location of the Trace call to the stack. The Cause of the | |||
| // resulting error is the same as the error parameter. If the other error is | |||
| // nil, the result will be nil. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Trace(err) | |||
| // } | |||
| // | |||
| func Trace(other error) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{previous: other, cause: Cause(other)} | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Annotate is used to add extra context to an existing error. The location of | |||
| // the Annotate call is recorded with the annotations. The file, line and | |||
| // function are also recorded. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Annotate(err, "failed to frombulate") | |||
| // } | |||
| // | |||
| func Annotate(other error, message string) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| previous: other, | |||
| cause: Cause(other), | |||
| message: message, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Annotatef is used to add extra context to an existing error. The location of | |||
| // the Annotate call is recorded with the annotations. The file, line and | |||
| // function are also recorded. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Annotatef(err, "failed to frombulate the %s", arg) | |||
| // } | |||
| // | |||
| func Annotatef(other error, format string, args ...interface{}) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| previous: other, | |||
| cause: Cause(other), | |||
| message: fmt.Sprintf(format, args...), | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // DeferredAnnotatef annotates the given error (when it is not nil) with the given | |||
| // format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef | |||
| // does nothing. This method is used in a defer statement in order to annotate any | |||
| // resulting error with the same message. | |||
| // | |||
| // For example: | |||
| // | |||
| // defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg) | |||
| // | |||
| func DeferredAnnotatef(err *error, format string, args ...interface{}) { | |||
| if *err == nil { | |||
| return | |||
| } | |||
| newErr := &Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| cause: Cause(*err), | |||
| previous: *err, | |||
| } | |||
| newErr.SetLocation(1) | |||
| *err = newErr | |||
| } | |||
| // Wrap changes the Cause of the error. The location of the Wrap call is also | |||
| // stored in the error stack. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // newErr := &packageError{"more context", private_value} | |||
| // return errors.Wrap(err, newErr) | |||
| // } | |||
| // | |||
| func Wrap(other, newDescriptive error) error { | |||
| err := &Err{ | |||
| previous: other, | |||
| cause: newDescriptive, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Wrapf changes the Cause of the error, and adds an annotation. The location | |||
| // of the Wrap call is also stored in the error stack. | |||
| // | |||
| // For example: | |||
| // if err := SomeFunc(); err != nil { | |||
| // return errors.Wrapf(err, simpleErrorType, "invalid value %q", value) | |||
| // } | |||
| // | |||
| func Wrapf(other, newDescriptive error, format string, args ...interface{}) error { | |||
| err := &Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| previous: other, | |||
| cause: newDescriptive, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Mask masks the given error with the given format string and arguments (like | |||
| // fmt.Sprintf), returning a new error that maintains the error stack, but | |||
| // hides the underlying error type. The error string still contains the full | |||
| // annotations. If you want to hide the annotations, call Wrap. | |||
| func Maskf(other error, format string, args ...interface{}) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| message: fmt.Sprintf(format, args...), | |||
| previous: other, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Mask hides the underlying error type, and records the location of the masking. | |||
| func Mask(other error) error { | |||
| if other == nil { | |||
| return nil | |||
| } | |||
| err := &Err{ | |||
| previous: other, | |||
| } | |||
| err.SetLocation(1) | |||
| return err | |||
| } | |||
| // Cause returns the cause of the given error. This will be either the | |||
| // original error, or the result of a Wrap or Mask call. | |||
| // | |||
| // Cause is the usual way to diagnose errors that may have been wrapped by | |||
| // the other errors functions. | |||
| func Cause(err error) error { | |||
| var diag error | |||
| if err, ok := err.(causer); ok { | |||
| diag = err.Cause() | |||
| } | |||
| if diag != nil { | |||
| return diag | |||
| } | |||
| return err | |||
| } | |||
| type causer interface { | |||
| Cause() error | |||
| } | |||
| type wrapper interface { | |||
| // Message returns the top level error message, | |||
| // not including the message from the Previous | |||
| // error. | |||
| Message() string | |||
| // Underlying returns the Previous error, or nil | |||
| // if there is none. | |||
| Underlying() error | |||
| } | |||
| type locationer interface { | |||
| Location() (string, int) | |||
| } | |||
| var ( | |||
| _ wrapper = (*Err)(nil) | |||
| _ locationer = (*Err)(nil) | |||
| _ causer = (*Err)(nil) | |||
| ) | |||
| // Details returns information about the stack of errors wrapped by err, in | |||
| // the format: | |||
| // | |||
| // [{filename:99: error one} {otherfile:55: cause of error one}] | |||
| // | |||
| // This is a terse alternative to ErrorStack as it returns a single line. | |||
| func Details(err error) string { | |||
| if err == nil { | |||
| return "[]" | |||
| } | |||
| var s []byte | |||
| s = append(s, '[') | |||
| for { | |||
| s = append(s, '{') | |||
| if err, ok := err.(locationer); ok { | |||
| file, line := err.Location() | |||
| if file != "" { | |||
| s = append(s, fmt.Sprintf("%s:%d", file, line)...) | |||
| s = append(s, ": "...) | |||
| } | |||
| } | |||
| if cerr, ok := err.(wrapper); ok { | |||
| s = append(s, cerr.Message()...) | |||
| err = cerr.Underlying() | |||
| } else { | |||
| s = append(s, err.Error()...) | |||
| err = nil | |||
| } | |||
| s = append(s, '}') | |||
| if err == nil { | |||
| break | |||
| } | |||
| s = append(s, ' ') | |||
| } | |||
| s = append(s, ']') | |||
| return string(s) | |||
| } | |||
| // ErrorStack returns a string representation of the annotated error. If the | |||
| // error passed as the parameter is not an annotated error, the result is | |||
| // simply the result of the Error() method on that error. | |||
| // | |||
| // If the error is an annotated error, a multi-line string is returned where | |||
| // each line represents one entry in the annotation stack. The full filename | |||
| // from the call stack is used in the output. | |||
| // | |||
| // first error | |||
| // github.com/juju/errors/annotation_test.go:193: | |||
| // github.com/juju/errors/annotation_test.go:194: annotation | |||
| // github.com/juju/errors/annotation_test.go:195: | |||
| // github.com/juju/errors/annotation_test.go:196: more context | |||
| // github.com/juju/errors/annotation_test.go:197: | |||
| func ErrorStack(err error) string { | |||
| return strings.Join(errorStack(err), "\n") | |||
| } | |||
| func errorStack(err error) []string { | |||
| if err == nil { | |||
| return nil | |||
| } | |||
| // We want the first error first | |||
| var lines []string | |||
| for { | |||
| var buff []byte | |||
| if err, ok := err.(locationer); ok { | |||
| file, line := err.Location() | |||
| // Strip off the leading GOPATH/src path elements. | |||
| file = trimGoPath(file) | |||
| if file != "" { | |||
| buff = append(buff, fmt.Sprintf("%s:%d", file, line)...) | |||
| buff = append(buff, ": "...) | |||
| } | |||
| } | |||
| if cerr, ok := err.(wrapper); ok { | |||
| message := cerr.Message() | |||
| buff = append(buff, message...) | |||
| // If there is a cause for this error, and it is different to the cause | |||
| // of the underlying error, then output the error string in the stack trace. | |||
| var cause error | |||
| if err1, ok := err.(causer); ok { | |||
| cause = err1.Cause() | |||
| } | |||
| err = cerr.Underlying() | |||
| if cause != nil && !sameError(Cause(err), cause) { | |||
| if message != "" { | |||
| buff = append(buff, ": "...) | |||
| } | |||
| buff = append(buff, cause.Error()...) | |||
| } | |||
| } else { | |||
| buff = append(buff, err.Error()...) | |||
| err = nil | |||
| } | |||
| lines = append(lines, string(buff)) | |||
| if err == nil { | |||
| break | |||
| } | |||
| } | |||
| // reverse the lines to get the original error, which was at the end of | |||
| // the list, back to the start. | |||
| var result []string | |||
| for i := len(lines); i > 0; i-- { | |||
| result = append(result, lines[i-1]) | |||
| } | |||
| return result | |||
| } | |||
| @@ -0,0 +1,38 @@ | |||
| // Copyright 2013, 2014 Canonical Ltd. | |||
| // Licensed under the LGPLv3, see LICENCE file for details. | |||
| package errors | |||
| import ( | |||
| "runtime" | |||
| "strings" | |||
| ) | |||
| // prefixSize is used internally to trim the user specific path from the | |||
| // front of the returned filenames from the runtime call stack. | |||
| var prefixSize int | |||
| // goPath is the deduced path based on the location of this file as compiled. | |||
| var goPath string | |||
| func init() { | |||
| _, file, _, ok := runtime.Caller(0) | |||
| if file == "?" { | |||
| return | |||
| } | |||
| if ok { | |||
| // We know that the end of the file should be: | |||
| // github.com/juju/errors/path.go | |||
| size := len(file) | |||
| suffix := len("github.com/juju/errors/path.go") | |||
| goPath = file[:size-suffix] | |||
| prefixSize = len(goPath) | |||
| } | |||
| } | |||
| func trimGoPath(filename string) string { | |||
| if strings.HasPrefix(filename, goPath) { | |||
| return filename[prefixSize:] | |||
| } | |||
| return filename | |||
| } | |||