vendorify
diff --git a/go/vendor/github.com/globalsign/mgo/LICENSE b/go/vendor/github.com/globalsign/mgo/LICENSE
new file mode 100644
index 0000000..770c767
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/LICENSE
@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/go/vendor/github.com/globalsign/mgo/bson/LICENSE b/go/vendor/github.com/globalsign/mgo/bson/LICENSE
new file mode 100644
index 0000000..8903260
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/LICENSE
@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/go/vendor/github.com/globalsign/mgo/bson/README.md b/go/vendor/github.com/globalsign/mgo/bson/README.md
new file mode 100644
index 0000000..5c5819e
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/README.md
@@ -0,0 +1,12 @@
+[![GoDoc](https://godoc.org/github.com/globalsign/mgo/bson?status.svg)](https://godoc.org/github.com/globalsign/mgo/bson)
+
+An Implementation of BSON for Go
+--------------------------------
+
+Package bson is an implementation of the [BSON specification](http://bsonspec.org) for Go.
+
+While the BSON package implements the BSON spec as faithfully as possible, there
+is some MongoDB specific behaviour (such as map keys `$in`, `$all`, etc) in the
+`bson` package. The priority is for backwards compatibility for the `mgo`
+driver, though fixes for obviously buggy behaviour is welcome (and features, etc
+behind feature flags).
diff --git a/go/vendor/github.com/globalsign/mgo/bson/bson.go b/go/vendor/github.com/globalsign/mgo/bson/bson.go
new file mode 100644
index 0000000..eb87ef6
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/bson.go
@@ -0,0 +1,836 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+//     http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+	"bytes"
+	"crypto/md5"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+//go:generate go run bson_corpus_spec_test_generator.go
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// Element types constants from BSON specification.
+const (
+	ElementFloat64                byte = 0x01
+	ElementString                 byte = 0x02
+	ElementDocument               byte = 0x03
+	ElementArray                  byte = 0x04
+	ElementBinary                 byte = 0x05
+	Element06                     byte = 0x06
+	ElementObjectId               byte = 0x07
+	ElementBool                   byte = 0x08
+	ElementDatetime               byte = 0x09
+	ElementNil                    byte = 0x0A
+	ElementRegEx                  byte = 0x0B
+	ElementDBPointer              byte = 0x0C
+	ElementJavaScriptWithoutScope byte = 0x0D
+	ElementSymbol                 byte = 0x0E
+	ElementJavaScriptWithScope    byte = 0x0F
+	ElementInt32                  byte = 0x10
+	ElementTimestamp              byte = 0x11
+	ElementInt64                  byte = 0x12
+	ElementDecimal128             byte = 0x13
+	ElementMinKey                 byte = 0xFF
+	ElementMaxKey                 byte = 0x7F
+
+	BinaryGeneric     byte = 0x00
+	BinaryFunction    byte = 0x01
+	BinaryBinaryOld   byte = 0x02
+	BinaryUUIDOld     byte = 0x03
+	BinaryUUID        byte = 0x04
+	BinaryMD5         byte = 0x05
+	BinaryUserDefined byte = 0x80
+)
+
+// Getter interface: a value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+	GetBSON() (interface{}, error)
+}
+
+// Setter interface: a value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.ErrSetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+//     type MyString string
+//
+//     func (s *MyString) SetBSON(raw bson.Raw) error {
+//         return raw.Unmarshal(s)
+//     }
+//
+type Setter interface {
+	SetBSON(raw Raw) error
+}
+
+// ErrSetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var ErrSetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way.  For instance:
+//
+//     bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type.  Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+//     bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important.  If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+	Name  string
+	Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+	m = make(M, len(d))
+	for _, item := range d {
+		m[item.Name] = item.Value
+	}
+	return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+//     http://bsonspec.org/#/specification
+//
+type Raw struct {
+	Kind byte
+	Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// RawDocElem elements of RawD type.
+type RawDocElem struct {
+	Name  string
+	Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+Ids
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+	d, err := hex.DecodeString(s)
+	if err != nil || len(d) != 12 {
+		panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+	}
+	return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+	if len(s) != 24 {
+		return false
+	}
+	_, err := hex.DecodeString(s)
+	return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot read random object id: %v", err))
+	}
+	return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+	var sum [3]byte
+	id := sum[:]
+	hostname, err1 := os.Hostname()
+	if err1 != nil {
+		_, err2 := io.ReadFull(rand.Reader, id)
+		if err2 != nil {
+			panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+		}
+		return id
+	}
+	hw := md5.New()
+	hw.Write([]byte(hostname))
+	copy(id, hw.Sum(nil))
+	return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+	var b [12]byte
+	// Timestamp, 4 bytes, big endian
+	binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+	// Machine, first 3 bytes of md5(hostname)
+	b[4] = machineId[0]
+	b[5] = machineId[1]
+	b[6] = machineId[2]
+	// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+	b[7] = byte(processId >> 8)
+	b[8] = byte(processId)
+	// Increment, 3 bytes, big endian
+	i := atomic.AddUint32(&objectIdCounter, 1)
+	b[9] = byte(i >> 16)
+	b[10] = byte(i >> 8)
+	b[11] = byte(i)
+	return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+	var b [12]byte
+	binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+	return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+	return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+	return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+	if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+		var v struct {
+			Id   json.RawMessage `json:"$oid"`
+			Func struct {
+				Id json.RawMessage
+			} `json:"$oidFunc"`
+		}
+		err := jdec(data, &v)
+		if err == nil {
+			if len(v.Id) > 0 {
+				data = []byte(v.Id)
+			} else {
+				data = []byte(v.Func.Id)
+			}
+		}
+	}
+	if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+		*id = ""
+		return nil
+	}
+	if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+		return fmt.Errorf("invalid ObjectId in JSON: %s", string(data))
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[1:25])
+	if err != nil {
+		return fmt.Errorf("invalid ObjectId in JSON: %s (%s)", string(data), err)
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+	return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+	if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+		*id = ""
+		return nil
+	}
+	if len(data) != 24 {
+		return fmt.Errorf("invalid ObjectId: %s", data)
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[:])
+	if err != nil {
+		return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+	return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+	if len(id) != 12 {
+		panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+	}
+	return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+	// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+	secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+	return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+	return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+	return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+	b := id.byteSlice(9, 12)
+	// Counter is stored as big-endian 3-byte value
+	return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+	return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+// Time returns the time part of ts which is stored with second precision.
+func (ts MongoTimestamp) Time() time.Time {
+	return time.Unix(int64(uint64(ts)>>32), 0)
+}
+
+// Counter returns the counter part of ts.
+func (ts MongoTimestamp) Counter() uint32 {
+	return uint32(ts)
+}
+
+// NewMongoTimestamp creates a timestamp using the given
+// date `t` (with second precision) and counter `c` (unique for `t`).
+//
+// Returns an error if time `t` is not between 1970-01-01T00:00:00Z
+// and 2106-02-07T06:28:15Z (inclusive).
+//
+// Note that two MongoTimestamps should never have the same (time, counter) combination:
+// the caller must ensure the counter `c` is increased if creating multiple MongoTimestamp
+// values for the same time `t` (ignoring fractions of seconds).
+func NewMongoTimestamp(t time.Time, c uint32) (MongoTimestamp, error) {
+	u := t.Unix()
+	if u < 0 || u > math.MaxUint32 {
+		return -1, errors.New("invalid value for time")
+	}
+
+	i := int64(u<<32 | int64(c))
+
+	return MongoTimestamp(i), nil
+}
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values.  Any kind should
+// work, but the following are known as of this writing:
+//
+//   0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+//   0x01 - Function (!?)
+//   0x02 - Obsolete generic.
+//   0x03 - UUID
+//   0x05 - MD5
+//   0x80 - User defined.
+//
+type Binary struct {
+	Kind byte
+	Data []byte
+}
+
+// RegEx represents a regular expression.  The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+	Pattern string
+	Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+	Code  string
+	Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+	Namespace string
+	Id        ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+	if r := recover(); r != nil {
+		if _, ok := r.(runtime.Error); ok {
+			panic(r)
+		} else if _, ok := r.(externalPanic); ok {
+			panic(r)
+		} else if s, ok := r.(string); ok {
+			*err = errors.New(s)
+		} else if e, ok := r.(error); ok {
+			*err = e
+		} else {
+			panic(r)
+		}
+	}
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty  Only include the field if it's not set to the zero
+//                value for the type or to empty slices or maps.
+//
+//     minsize    Marshal an int64 value as an int32, if that's feasible
+//                while preserving the numeric value.
+//
+//     inline     Inline the field, which must be a struct or a map,
+//                causing all of its fields or keys to be processed as if
+//                they were part of the outer struct. For maps, keys must
+//                not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	return MarshalBuffer(in, make([]byte, 0, initialBufferSize))
+}
+
+// MarshalBuffer behaves the same way as Marshal, except that instead of
+// allocating a new byte slice it tries to use the received byte slice and
+// only allocates more memory if necessary to fit the marshaled value.
+func MarshalBuffer(in interface{}, buf []byte) (out []byte, err error) {
+	defer handleErr(&err)
+	e := &encoder{buf}
+	e.addDoc(reflect.ValueOf(in))
+	return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value.  The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+//     inline     Inline the field, which must be a struct or a map.
+//                Inlined structs are handled as if its fields were part
+//                of the outer struct. An inlined map causes keys that do
+//                not match any other struct field to be inserted in the
+//                map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data.  The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+//   value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+	if raw, ok := out.(*Raw); ok {
+		raw.Kind = 3
+		raw.Data = in
+		return nil
+	}
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(in)
+		d.readDocTo(v)
+		if d.i < len(d.in) {
+			return errors.New("document is corrupted")
+		}
+	case reflect.Struct:
+		return errors.New("unmarshal can't deal with struct values. Use a pointer")
+	default:
+		return errors.New("unmarshal needs a map or a pointer to a struct")
+	}
+	return nil
+}
+
+// Unmarshal deserializes raw into the out value.  If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		v = v.Elem()
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(raw.Data)
+		good := d.readElemTo(v, raw.Kind)
+		if !good {
+			return &TypeError{v.Type(), raw.Kind}
+		}
+	case reflect.Struct:
+		return errors.New("raw Unmarshal can't deal with struct values. Use a pointer")
+	default:
+		return errors.New("raw Unmarshal needs a map or a valid pointer")
+	}
+	return nil
+}
+
+// TypeError store details for type error occuring
+// during unmarshaling
+type TypeError struct {
+	Type reflect.Type
+	Kind byte
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+	InlineMap  int
+	Zero       reflect.Value
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	MinSize   bool
+	Inline    []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+	return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	structMapMutex.RLock()
+	sinfo, found := structMap[st]
+	structMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("bson")
+
+		// Fall-back to JSON struct tag, if feature flag is set.
+		if tag == "" && useJSONTagFallback {
+			tag = field.Tag.Get("json")
+		}
+
+		// If there's no bson/json tag available.
+		if tag == "" {
+			// If there's no tag, and also no tag: value splits (i.e. no colon)
+			// then assume the entire tag is the value
+			if strings.Index(string(field.Tag), ":") < 0 {
+				tag = string(field.Tag)
+			}
+		}
+
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "minsize":
+					info.MinSize = true
+				case "inline":
+					inline = true
+				default:
+					msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+					panic(externalPanic(msg))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Ptr:
+				// allow only pointer to struct
+				if kind := field.Type.Elem().Kind(); kind != reflect.Struct {
+					return nil, errors.New("Option ,inline allows a pointer only to a struct, was given pointer to " + kind.String())
+				}
+
+				field.Type = field.Type.Elem()
+				fallthrough
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				panic("Option ,inline needs a struct value or a pointer to a struct or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+	sinfo = &structInfo{
+		fieldsMap,
+		fieldsList,
+		inlineMap,
+		reflect.New(st).Elem(),
+	}
+	structMapMutex.Lock()
+	structMap[st] = sinfo
+	structMapMutex.Unlock()
+	return sinfo, nil
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go b/go/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
new file mode 100644
index 0000000..3525a00
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go
@@ -0,0 +1,294 @@
+// +build ignore
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"go/format"
+	"html/template"
+	"io/ioutil"
+	"log"
+	"path/filepath"
+	"strings"
+
+	"github.com/globalsign/mgo/internal/json"
+)
+
+func main() {
+	log.SetFlags(0)
+	log.SetPrefix(name + ": ")
+
+	var g Generator
+
+	fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name)
+
+	src := g.generate()
+
+	err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644)
+	if err != nil {
+		log.Fatalf("writing output: %s", err)
+	}
+}
+
+// Generator holds the state of the analysis. Primarily used to buffer
+// the output for format.Source.
+type Generator struct {
+	bytes.Buffer // Accumulated output.
+}
+
+// format returns the gofmt-ed contents of the Generator's buffer.
+func (g *Generator) format() []byte {
+	src, err := format.Source(g.Bytes())
+	if err != nil {
+		// Should never happen, but can arise when developing this code.
+		// The user can compile the output to see the error.
+		log.Printf("warning: internal error: invalid Go generated: %s", err)
+		log.Printf("warning: compile the package to analyze the error")
+		return g.Bytes()
+	}
+	return src
+}
+
+// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS
+
+const name = "bson_corpus_spec_test_generator"
+
+func (g *Generator) generate() []byte {
+
+	testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json")
+	if err != nil {
+		log.Fatalf("error reading bson-corpus files: %s", err)
+	}
+
+	tests, err := g.loadTests(testFiles)
+	if err != nil {
+		log.Fatalf("error loading tests: %s", err)
+	}
+
+	tmpl, err := g.getTemplate()
+	if err != nil {
+		log.Fatalf("error loading template: %s", err)
+	}
+
+	tmpl.Execute(&g.Buffer, tests)
+
+	return g.format()
+}
+
+func (g *Generator) loadTests(filenames []string) ([]*testDef, error) {
+	var tests []*testDef
+	for _, filename := range filenames {
+		test, err := g.loadTest(filename)
+		if err != nil {
+			return nil, err
+		}
+
+		tests = append(tests, test)
+	}
+
+	return tests, nil
+}
+
+func (g *Generator) loadTest(filename string) (*testDef, error) {
+	content, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	var testDef testDef
+	err = json.Unmarshal(content, &testDef)
+	if err != nil {
+		return nil, err
+	}
+
+	names := make(map[string]struct{})
+
+	for i := len(testDef.Valid) - 1; i >= 0; i-- {
+		if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" {
+			testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...)
+			continue
+		}
+
+		name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description)
+		nameIdx := name
+		j := 1
+		for {
+			if _, ok := names[nameIdx]; !ok {
+				break
+			}
+
+			nameIdx = fmt.Sprintf("%s_%d", name, j)
+		}
+
+		names[nameIdx] = struct{}{}
+
+		testDef.Valid[i].TestDef = &testDef
+		testDef.Valid[i].Name = nameIdx
+		testDef.Valid[i].StructTest = testDef.TestKey != "" &&
+			(testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) &&
+			!testDef.Deprecated
+	}
+
+	for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- {
+		if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") {
+			testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...)
+			continue
+		}
+
+		name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description)
+		nameIdx := name
+		j := 1
+		for {
+			if _, ok := names[nameIdx]; !ok {
+				break
+			}
+
+			nameIdx = fmt.Sprintf("%s_%d", name, j)
+		}
+		names[nameIdx] = struct{}{}
+
+		testDef.DecodeErrors[i].Name = nameIdx
+	}
+
+	return &testDef, nil
+}
+
+func (g *Generator) getTemplate() (*template.Template, error) {
+	content := `package bson_test
+
+import (
+    "encoding/hex"
+	"time"
+
+	. "gopkg.in/check.v1"
+    "github.com/globalsign/mgo/bson"
+)
+
+func testValid(c *C, in []byte, expected []byte, result interface{}) {
+	err := bson.Unmarshal(in, result)
+	c.Assert(err, IsNil)
+
+	out, err := bson.Marshal(result)
+	c.Assert(err, IsNil)
+
+	c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out))
+}
+
+func testDecodeSkip(c *C, in []byte) {
+	err := bson.Unmarshal(in, &struct{}{})
+	c.Assert(err, IsNil)
+}
+
+func testDecodeError(c *C, in []byte, result interface{}) {
+	err := bson.Unmarshal(in, result)
+	c.Assert(err, Not(IsNil))
+}
+
+{{range .}}
+{{range .Valid}}
+func (s *S) Test{{.Name}}(c *C) {
+    b, err := hex.DecodeString("{{.Bson}}")
+	c.Assert(err, IsNil)
+
+    {{if .CanonicalBson}}
+    cb, err := hex.DecodeString("{{.CanonicalBson}}")
+	c.Assert(err, IsNil)
+	{{else}}
+    cb := b
+    {{end}}
+
+    var resultD bson.D
+	testValid(c, b, cb, &resultD)
+	{{if .StructTest}}var resultS struct {
+		Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + `
+	}
+	testValid(c, b, cb, &resultS){{end}}
+
+	testDecodeSkip(c, b)
+}
+{{end}}
+
+{{range .DecodeErrors}}
+func (s *S) Test{{.Name}}(c *C) {
+	b, err := hex.DecodeString("{{.Bson}}")
+	c.Assert(err, IsNil)
+
+	var resultD bson.D
+	testDecodeError(c, b, &resultD)
+}
+{{end}}
+{{end}}
+`
+	tmpl, err := template.New("").Parse(content)
+	if err != nil {
+		return nil, err
+	}
+	return tmpl, nil
+}
+
+func cleanupFuncName(name string) string {
+	return strings.Map(func(r rune) rune {
+		if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) {
+			return r
+		}
+		return '_'
+	}, name)
+}
+
+type testDef struct {
+	Description  string         `json:"description"`
+	BsonType     string         `json:"bson_type"`
+	TestKey      string         `json:"test_key"`
+	Valid        []*valid       `json:"valid"`
+	DecodeErrors []*decodeError `json:"decodeErrors"`
+	Deprecated   bool           `json:"deprecated"`
+}
+
+func (t *testDef) GoType() string {
+	switch t.BsonType {
+	case "0x01":
+		return "float64"
+	case "0x02":
+		return "string"
+	case "0x03":
+		return "bson.D"
+	case "0x04":
+		return "[]interface{}"
+	case "0x05":
+		return "[]byte"
+	case "0x07":
+		return "bson.ObjectId"
+	case "0x08":
+		return "bool"
+	case "0x09":
+		return "time.Time"
+	case "0x0E":
+		return "string"
+	case "0x10":
+		return "int32"
+	case "0x12":
+		return "int64"
+	case "0x13":
+		return "bson.Decimal"
+	default:
+		return "interface{}"
+	}
+}
+
+type valid struct {
+	Description   string `json:"description"`
+	Bson          string `json:"bson"`
+	CanonicalBson string `json:"canonical_bson"`
+
+	Name       string
+	StructTest bool
+	TestDef    *testDef
+}
+
+type decodeError struct {
+	Description string `json:"description"`
+	Bson        string `json:"bson"`
+
+	Name string
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/compatibility.go b/go/vendor/github.com/globalsign/mgo/bson/compatibility.go
new file mode 100644
index 0000000..66efd46
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/compatibility.go
@@ -0,0 +1,29 @@
+package bson
+
+// Current state of the JSON tag fallback option.
+var useJSONTagFallback = false
+var useRespectNilValues = false
+
+// SetJSONTagFallback enables or disables the JSON-tag fallback for structure tagging. When this is enabled, structures
+// without BSON tags on a field will fall-back to using the JSON tag (if present).
+func SetJSONTagFallback(state bool) {
+	useJSONTagFallback = state
+}
+
+// JSONTagFallbackState returns the current status of the JSON tag fallback compatability option. See SetJSONTagFallback
+// for more information.
+func JSONTagFallbackState() bool {
+	return useJSONTagFallback
+}
+
+// SetRespectNilValues enables or disables serializing nil slices or maps to `null` values.
+// In other words it enables `encoding/json` compatible behaviour.
+func SetRespectNilValues(state bool) {
+	useRespectNilValues = state
+}
+
+// RespectNilValuesState returns the current status of the JSON nil slices and maps fallback compatibility option.
+// See SetRespectNilValues for more information.
+func RespectNilValuesState() bool {
+	return useRespectNilValues
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/decimal.go b/go/vendor/github.com/globalsign/mgo/bson/decimal.go
new file mode 100644
index 0000000..672ba18
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/decimal.go
@@ -0,0 +1,312 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Inf"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+// ParseDecimal128 parse a string and return the corresponding value as
+// a decimal128
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/decode.go b/go/vendor/github.com/globalsign/mgo/bson/decode.go
new file mode 100644
index 0000000..658856a
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/decode.go
@@ -0,0 +1,1055 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+type decoder struct {
+	in      []byte
+	i       int
+	docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+	return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+	panic("Document is corrupted")
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+	setterUnknown = iota
+	setterNone
+	setterType
+	setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+	var iface Setter
+	setterIface = reflect.TypeOf(&iface).Elem()
+	setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+	setterMutex.RLock()
+	style := setterStyles[outt]
+	setterMutex.RUnlock()
+	if style != setterUnknown {
+		return style
+	}
+
+	setterMutex.Lock()
+	defer setterMutex.Unlock()
+	if outt.Implements(setterIface) {
+		style = setterType
+	} else if reflect.PtrTo(outt).Implements(setterIface) {
+		style = setterAddr
+	} else {
+		style = setterNone
+	}
+	setterStyles[outt] = style
+	return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+	style := setterStyle(outt)
+	if style == setterNone {
+		return nil
+	}
+	if style == setterAddr {
+		if !out.CanAddr() {
+			return nil
+		}
+		out = out.Addr()
+	} else if outt.Kind() == reflect.Ptr && out.IsNil() {
+		out.Set(reflect.New(outt.Elem()))
+	}
+	return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+	var none reflect.Value
+	for _, k := range m.MapKeys() {
+		m.SetMapIndex(k, none)
+	}
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+	var elemType reflect.Type
+	outt := out.Type()
+	outk := outt.Kind()
+
+	for {
+		if outk == reflect.Ptr && out.IsNil() {
+			out.Set(reflect.New(outt.Elem()))
+		}
+		if setter := getSetter(outt, out); setter != nil {
+			raw := d.readRaw(ElementDocument)
+			err := setter.SetBSON(raw)
+			if _, ok := err.(*TypeError); err != nil && !ok {
+				panic(err)
+			}
+			return
+		}
+		if outk == reflect.Ptr {
+			out = out.Elem()
+			outt = out.Type()
+			outk = out.Kind()
+			continue
+		}
+		break
+	}
+
+	var fieldsMap map[string]fieldInfo
+	var inlineMap reflect.Value
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(d.readRaw(ElementDocument)))
+		return
+	}
+
+	origout := out
+	if outk == reflect.Interface {
+		if d.docType.Kind() == reflect.Map {
+			mv := reflect.MakeMap(d.docType)
+			out.Set(mv)
+			out = mv
+		} else {
+			dv := reflect.New(d.docType).Elem()
+			out.Set(dv)
+			out = dv
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	docType := d.docType
+	keyType := typeString
+	convertKey := false
+	switch outk {
+	case reflect.Map:
+		keyType = outt.Key()
+		if keyType != typeString {
+			convertKey = true
+		}
+		elemType = outt.Elem()
+		if elemType == typeIface {
+			d.docType = outt
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(out.Type()))
+		} else if out.Len() > 0 {
+			clearMap(out)
+		}
+	case reflect.Struct:
+		sinfo, err := getStructInfo(out.Type())
+		if err != nil {
+			panic(err)
+		}
+		fieldsMap = sinfo.FieldsMap
+		out.Set(sinfo.Zero)
+		if sinfo.InlineMap != -1 {
+			inlineMap = out.Field(sinfo.InlineMap)
+			if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+				clearMap(inlineMap)
+			}
+			elemType = inlineMap.Type().Elem()
+			if elemType == typeIface {
+				d.docType = inlineMap.Type()
+			}
+		}
+	case reflect.Slice:
+		switch outt.Elem() {
+		case typeDocElem:
+			origout.Set(d.readDocElems(outt))
+			return
+		case typeRawDocElem:
+			origout.Set(d.readRawDocElems(outt))
+			return
+		}
+		fallthrough
+	default:
+		panic("Unsupported document type for unmarshalling: " + out.Type().String())
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+
+		switch outk {
+		case reflect.Map:
+			e := reflect.New(elemType).Elem()
+			if d.readElemTo(e, kind) {
+				k := reflect.ValueOf(name)
+				if convertKey {
+					mapKeyType := out.Type().Key()
+					mapKeyKind := mapKeyType.Kind()
+
+					switch mapKeyKind {
+					case reflect.Int:
+						fallthrough
+					case reflect.Int8:
+						fallthrough
+					case reflect.Int16:
+						fallthrough
+					case reflect.Int32:
+						fallthrough
+					case reflect.Int64:
+						fallthrough
+					case reflect.Uint:
+						fallthrough
+					case reflect.Uint8:
+						fallthrough
+					case reflect.Uint16:
+						fallthrough
+					case reflect.Uint32:
+						fallthrough
+					case reflect.Uint64:
+						fallthrough
+					case reflect.Float32:
+						fallthrough
+					case reflect.Float64:
+						parsed := d.parseMapKeyAsFloat(k, mapKeyKind)
+						k = reflect.ValueOf(parsed)
+					case reflect.String:
+						mapKeyType = keyType
+					default:
+						panic("BSON map must have string or decimal keys. Got: " + outt.String())
+					}
+
+					k = k.Convert(mapKeyType)
+				}
+				out.SetMapIndex(k, e)
+			}
+		case reflect.Struct:
+			if info, ok := fieldsMap[name]; ok {
+				if info.Inline == nil {
+					d.readElemTo(out.Field(info.Num), kind)
+				} else {
+					d.readElemTo(out.FieldByIndex(info.Inline), kind)
+				}
+			} else if inlineMap.IsValid() {
+				if inlineMap.IsNil() {
+					inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+				}
+				e := reflect.New(elemType).Elem()
+				if d.readElemTo(e, kind) {
+					inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+				}
+			} else {
+				d.dropElem(kind)
+			}
+		case reflect.Slice:
+		}
+
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+	d.docType = docType
+}
+
+func (decoder) parseMapKeyAsFloat(k reflect.Value, mapKeyKind reflect.Kind) float64 {
+	parsed, err := strconv.ParseFloat(k.String(), 64)
+	if err != nil {
+		panic("Map key is defined to be a decimal type (" + mapKeyKind.String() + ") but got error " +
+			err.Error())
+	}
+
+	return parsed
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	i := 0
+	l := out.Len()
+	for d.in[d.i] != '\x00' {
+		if i >= l {
+			panic("Length mismatch on array field")
+		}
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		d.readElemTo(out.Index(i), kind)
+		if d.i >= end {
+			corrupted()
+		}
+		i++
+	}
+	if i != l {
+		panic("Length mismatch on array field")
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+	tmp := make([]reflect.Value, 0, 8)
+	elemType := t.Elem()
+	if elemType == typeRawDocElem {
+		d.dropElem(ElementArray)
+		return reflect.Zero(t).Interface()
+	}
+	if elemType == typeRaw {
+		return d.readSliceOfRaw()
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		e := reflect.New(elemType).Elem()
+		if d.readElemTo(e, kind) {
+			tmp = append(tmp, e)
+		}
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+
+	n := len(tmp)
+	slice := reflect.MakeSlice(t, n, n)
+	for i := 0; i != n; i++ {
+		slice.Index(i).Set(tmp[i])
+	}
+	return slice.Interface()
+}
+
+func BSONElementSize(kind byte, offset int, buffer []byte) (int, error) {
+	switch kind {
+	case ElementFloat64: // Float64
+		return 8, nil
+	case ElementJavaScriptWithoutScope: // JavaScript without scope
+		fallthrough
+	case ElementSymbol: // Symbol
+		fallthrough
+	case ElementString: // UTF-8 string
+		size, err := getSize(offset, buffer)
+		if err != nil {
+			return 0, err
+		}
+		if size < 1 {
+			return 0, errors.New("String size can't be less then one byte")
+		}
+		size += 4
+		if offset+size > len(buffer) {
+			return 0, io.ErrUnexpectedEOF
+		}
+		if buffer[offset+size-1] != 0 {
+			return 0, errors.New("Invalid string: non zero-terminated")
+		}
+		return size, nil
+	case ElementArray: // Array
+		fallthrough
+	case ElementDocument: // Document
+		size, err := getSize(offset, buffer)
+		if err != nil {
+			return 0, err
+		}
+		if size < 5 {
+			return 0, errors.New("Declared document size is too small")
+		}
+		return size, nil
+	case ElementBinary: // Binary
+		size, err := getSize(offset, buffer)
+		if err != nil {
+			return 0, err
+		}
+		if size < 0 {
+			return 0, errors.New("Binary data size can't be negative")
+		}
+		return size + 5, nil
+	case Element06: // Undefined (obsolete, but still seen in the wild)
+		return 0, nil
+	case ElementObjectId: // ObjectId
+		return 12, nil
+	case ElementBool: // Bool
+		return 1, nil
+	case ElementDatetime: // Timestamp
+		return 8, nil
+	case ElementNil: // Nil
+		return 0, nil
+	case ElementRegEx: // RegEx
+		end := offset
+		for i := 0; i < 2; i++ {
+			for end < len(buffer) && buffer[end] != '\x00' {
+				end++
+			}
+			end++
+		}
+		if end > len(buffer) {
+			return 0, io.ErrUnexpectedEOF
+		}
+		return end - offset, nil
+	case ElementDBPointer: // DBPointer
+		size, err := getSize(offset, buffer)
+		if err != nil {
+			return 0, err
+		}
+		if size < 1 {
+			return 0, errors.New("String size can't be less then one byte")
+		}
+		return size + 12 + 4, nil
+	case ElementJavaScriptWithScope: // JavaScript with scope
+		size, err := getSize(offset, buffer)
+		if err != nil {
+			return 0, err
+		}
+		if size < 4+5+5 {
+			return 0, errors.New("Declared document element is too small")
+		}
+		return size, nil
+	case ElementInt32: // Int32
+		return 4, nil
+	case ElementTimestamp: // Mongo-specific timestamp
+		return 8, nil
+	case ElementInt64: // Int64
+		return 8, nil
+	case ElementDecimal128: // Decimal128
+		return 16, nil
+	case ElementMaxKey: // Max key
+		return 0, nil
+	case ElementMinKey: // Min key
+		return 0, nil
+	default:
+		return 0, errors.New(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+	}
+}
+
+func (d *decoder) readRaw(kind byte) Raw {
+	size, err := BSONElementSize(kind, d.i, d.in)
+	if err != nil {
+		corrupted()
+	}
+	if d.i+size > len(d.in) {
+		corrupted()
+	}
+	d.i += size
+	return Raw{
+		Kind: kind,
+		Data: d.in[d.i-size : d.i],
+	}
+}
+
+func (d *decoder) readSliceOfRaw() interface{} {
+	tmp := make([]Raw, 0, 8)
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		e := d.readRaw(kind)
+		tmp = append(tmp, e)
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+	return tmp
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]DocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := DocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]RawDocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := RawDocElem{Name: name, Value: d.readRaw(kind)}
+		slice = append(slice, e)
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+		f(kind, name)
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+func (d *decoder) dropElem(kind byte) {
+	size, err := BSONElementSize(kind, d.i, d.in)
+	if err != nil {
+		corrupted()
+	}
+	if d.i+size > len(d.in) {
+		corrupted()
+	}
+	d.i += size
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+	outt := out.Type()
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(d.readRaw(kind)))
+		return true
+	}
+
+	if outt == typeRawPtr {
+		raw := d.readRaw(kind)
+		out.Set(reflect.ValueOf(&raw))
+		return true
+	}
+
+	if kind == ElementDocument {
+		// Delegate unmarshaling of documents.
+		outt := out.Type()
+		outk := out.Kind()
+		switch outk {
+		case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+			d.readDocTo(out)
+			return true
+		}
+		if setterStyle(outt) != setterNone {
+			d.readDocTo(out)
+			return true
+		}
+		if outk == reflect.Slice {
+			switch outt.Elem() {
+			case typeDocElem:
+				out.Set(d.readDocElems(outt))
+			case typeRawDocElem:
+				out.Set(d.readRawDocElems(outt))
+			default:
+				d.dropElem(kind)
+			}
+			return true
+		}
+		d.dropElem(kind)
+		return true
+	}
+
+	if setter := getSetter(outt, out); setter != nil {
+		err := setter.SetBSON(d.readRaw(kind))
+		if err == ErrSetZero {
+			out.Set(reflect.Zero(outt))
+			return true
+		}
+		if err == nil {
+			return true
+		}
+		if _, ok := err.(*TypeError); !ok {
+			panic(err)
+		}
+		return false
+	}
+
+	var in interface{}
+
+	switch kind {
+	case ElementFloat64:
+		in = d.readFloat64()
+	case ElementString:
+		in = d.readStr()
+	case ElementDocument:
+		panic("Can't happen. Handled above.")
+	case ElementArray:
+		outt := out.Type()
+		if setterStyle(outt) != setterNone {
+			// Skip the value so its data is handed to the setter below.
+			d.dropElem(kind)
+			break
+		}
+		for outt.Kind() == reflect.Ptr {
+			outt = outt.Elem()
+		}
+		switch outt.Kind() {
+		case reflect.Array:
+			d.readArrayDocTo(out)
+			return true
+		case reflect.Slice:
+			in = d.readSliceDoc(outt)
+		default:
+			in = d.readSliceDoc(typeSlice)
+		}
+	case ElementBinary:
+		b := d.readBinary()
+		if b.Kind == BinaryGeneric || b.Kind == BinaryBinaryOld {
+			in = b.Data
+		} else {
+			in = b
+		}
+	case Element06: // Undefined (obsolete, but still seen in the wild)
+		in = Undefined
+	case ElementObjectId:
+		in = ObjectId(d.readBytes(12))
+	case ElementBool:
+		in = d.readBool()
+	case ElementDatetime: // Timestamp
+		// MongoDB handles timestamps as milliseconds.
+		i := d.readInt64()
+		if i == -62135596800000 {
+			in = time.Time{} // In UTC for convenience.
+		} else {
+			in = time.Unix(i/1e3, i%1e3*1e6).UTC()
+		}
+	case ElementNil:
+		in = nil
+	case ElementRegEx:
+		in = d.readRegEx()
+	case ElementDBPointer:
+		in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+	case ElementJavaScriptWithoutScope:
+		in = JavaScript{Code: d.readStr()}
+	case ElementSymbol:
+		in = Symbol(d.readStr())
+	case ElementJavaScriptWithScope:
+		start := d.i
+		l := int(d.readInt32())
+		js := JavaScript{d.readStr(), make(M)}
+		d.readDocTo(reflect.ValueOf(js.Scope))
+		if d.i != start+l {
+			corrupted()
+		}
+		in = js
+	case ElementInt32:
+		in = int(d.readInt32())
+	case ElementTimestamp: // Mongo-specific timestamp
+		in = MongoTimestamp(d.readInt64())
+	case ElementInt64:
+		switch out.Type() {
+		case typeTimeDuration:
+			in = time.Duration(time.Duration(d.readInt64()) * time.Millisecond)
+		default:
+			in = d.readInt64()
+		}
+	case ElementDecimal128:
+		in = Decimal128{
+			l: uint64(d.readInt64()),
+			h: uint64(d.readInt64()),
+		}
+	case ElementMaxKey:
+		in = MaxKey
+	case ElementMinKey:
+		in = MinKey
+	default:
+		panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+	}
+
+	if in == nil {
+		out.Set(reflect.Zero(outt))
+		return true
+	}
+
+	outk := outt.Kind()
+
+	// Dereference and initialize pointer if necessary.
+	first := true
+	for outk == reflect.Ptr {
+		if !out.IsNil() {
+			out = out.Elem()
+		} else {
+			elem := reflect.New(outt.Elem())
+			if first {
+				// Only set if value is compatible.
+				first = false
+				defer func(out, elem reflect.Value) {
+					if good {
+						out.Set(elem)
+					}
+				}(out, elem)
+			} else {
+				out.Set(elem)
+			}
+			out = elem
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	inv := reflect.ValueOf(in)
+	if outt == inv.Type() {
+		out.Set(inv)
+		return true
+	}
+
+	switch outk {
+	case reflect.Interface:
+		out.Set(inv)
+		return true
+	case reflect.String:
+		switch inv.Kind() {
+		case reflect.String:
+			out.SetString(inv.String())
+			return true
+		case reflect.Slice:
+			if b, ok := in.([]byte); ok {
+				out.SetString(string(b))
+				return true
+			}
+		case reflect.Int, reflect.Int64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatInt(inv.Int(), 10))
+				return true
+			}
+		case reflect.Float64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+				return true
+			}
+		}
+	case reflect.Slice, reflect.Array:
+		// Remember, array (0x04) slices are built with the correct
+		// element type.  If we are here, must be a cross BSON kind
+		// conversion (e.g. 0x05 unmarshalling on string).
+		if outt.Elem().Kind() != reflect.Uint8 {
+			break
+		}
+		switch inv.Kind() {
+		case reflect.String:
+			slice := []byte(inv.String())
+			out.Set(reflect.ValueOf(slice))
+			return true
+		case reflect.Slice:
+			switch outt.Kind() {
+			case reflect.Array:
+				reflect.Copy(out, inv)
+			case reflect.Slice:
+				out.SetBytes(inv.Bytes())
+			}
+			return true
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetInt(inv.Int())
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetInt(int64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetInt(1)
+			} else {
+				out.SetInt(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("can't happen: no uint types in BSON (!?)")
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetUint(uint64(inv.Int()))
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetUint(uint64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetUint(1)
+			} else {
+				out.SetUint(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON.")
+		}
+	case reflect.Float32, reflect.Float64:
+		switch inv.Kind() {
+		case reflect.Float32, reflect.Float64:
+			out.SetFloat(inv.Float())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetFloat(float64(inv.Int()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetFloat(1)
+			} else {
+				out.SetFloat(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Bool:
+		switch inv.Kind() {
+		case reflect.Bool:
+			out.SetBool(inv.Bool())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetBool(inv.Int() != 0)
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetBool(inv.Float() != 0)
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Struct:
+		if outt == typeURL && inv.Kind() == reflect.String {
+			u, err := url.Parse(inv.String())
+			if err != nil {
+				panic(err)
+			}
+			out.Set(reflect.ValueOf(u).Elem())
+			return true
+		}
+		if outt == typeBinary {
+			if b, ok := in.([]byte); ok {
+				out.Set(reflect.ValueOf(Binary{Data: b}))
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+	re := RegEx{}
+	re.Pattern = d.readCStr()
+	re.Options = d.readCStr()
+	return re
+}
+
+func (d *decoder) readBinary() Binary {
+	l := d.readInt32()
+	b := Binary{}
+	b.Kind = d.readByte()
+	if b.Kind == BinaryBinaryOld && l > 4 {
+		// Weird obsolete format with redundant length.
+		rl := d.readInt32()
+		if rl != l-4 {
+			corrupted()
+		}
+		l = rl
+	}
+	b.Data = d.readBytes(l)
+	return b
+}
+
+func (d *decoder) readStr() string {
+	l := d.readInt32()
+	b := d.readBytes(l - 1)
+	if d.readByte() != '\x00' {
+		corrupted()
+	}
+	return string(b)
+}
+
+func (d *decoder) readCStr() string {
+	start := d.i
+	end := start
+	l := len(d.in)
+	for ; end != l; end++ {
+		if d.in[end] == '\x00' {
+			break
+		}
+	}
+	d.i = end + 1
+	if d.i > l {
+		corrupted()
+	}
+	return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+	b := d.readByte()
+	if b == 0 {
+		return false
+	}
+	if b == 1 {
+		return true
+	}
+	panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+	return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+	b := d.readBytes(4)
+	return int32((uint32(b[0]) << 0) |
+		(uint32(b[1]) << 8) |
+		(uint32(b[2]) << 16) |
+		(uint32(b[3]) << 24))
+}
+
+func getSize(offset int, b []byte) (int, error) {
+	if offset+4 > len(b) {
+		return 0, io.ErrUnexpectedEOF
+	}
+	return int((uint32(b[offset]) << 0) |
+		(uint32(b[offset+1]) << 8) |
+		(uint32(b[offset+2]) << 16) |
+		(uint32(b[offset+3]) << 24)), nil
+}
+
+func (d *decoder) readInt64() int64 {
+	b := d.readBytes(8)
+	return int64((uint64(b[0]) << 0) |
+		(uint64(b[1]) << 8) |
+		(uint64(b[2]) << 16) |
+		(uint64(b[3]) << 24) |
+		(uint64(b[4]) << 32) |
+		(uint64(b[5]) << 40) |
+		(uint64(b[6]) << 48) |
+		(uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+	i := d.i
+	d.i++
+	if d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+	if length < 0 {
+		corrupted()
+	}
+	start := d.i
+	d.i += int(length)
+	if d.i < start || d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[start : start+int(length)]
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/encode.go b/go/vendor/github.com/globalsign/mgo/bson/encode.go
new file mode 100644
index 0000000..d0c6b2a
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/encode.go
@@ -0,0 +1,645 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+	typeBinary         = reflect.TypeOf(Binary{})
+	typeObjectId       = reflect.TypeOf(ObjectId(""))
+	typeDBPointer      = reflect.TypeOf(DBPointer{"", ObjectId("")})
+	typeSymbol         = reflect.TypeOf(Symbol(""))
+	typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+	typeOrderKey       = reflect.TypeOf(MinKey)
+	typeDocElem        = reflect.TypeOf(DocElem{})
+	typeRawDocElem     = reflect.TypeOf(RawDocElem{})
+	typeRaw            = reflect.TypeOf(Raw{})
+	typeRawPtr         = reflect.PtrTo(reflect.TypeOf(Raw{}))
+	typeURL            = reflect.TypeOf(url.URL{})
+	typeTime           = reflect.TypeOf(time.Time{})
+	typeString         = reflect.TypeOf("")
+	typeJSONNumber     = reflect.TypeOf(json.Number(""))
+	typeTimeDuration   = reflect.TypeOf(time.Duration(0))
+)
+
+var (
+	// spec for []uint8 or []byte encoding
+	arrayOps = map[string]bool{
+		"$in":  true,
+		"$nin": true,
+		"$all": true,
+	}
+)
+
+const itoaCacheSize = 32
+
+const (
+	getterUnknown = iota
+	getterNone
+	getterTypeVal
+	getterTypePtr
+	getterAddr
+)
+
+var itoaCache []string
+
+var getterStyles map[reflect.Type]int
+var getterIface reflect.Type
+var getterMutex sync.RWMutex
+
+func init() {
+	itoaCache = make([]string, itoaCacheSize)
+	for i := 0; i != itoaCacheSize; i++ {
+		itoaCache[i] = strconv.Itoa(i)
+	}
+	var iface Getter
+	getterIface = reflect.TypeOf(&iface).Elem()
+	getterStyles = make(map[reflect.Type]int)
+}
+
+func itoa(i int) string {
+	if i < itoaCacheSize {
+		return itoaCache[i]
+	}
+	return strconv.Itoa(i)
+}
+
+func getterStyle(outt reflect.Type) int {
+	getterMutex.RLock()
+	style := getterStyles[outt]
+	getterMutex.RUnlock()
+	if style != getterUnknown {
+		return style
+	}
+
+	getterMutex.Lock()
+	defer getterMutex.Unlock()
+	if outt.Implements(getterIface) {
+		vt := outt
+		for vt.Kind() == reflect.Ptr {
+			vt = vt.Elem()
+		}
+		if vt.Implements(getterIface) {
+			style = getterTypeVal
+		} else {
+			style = getterTypePtr
+		}
+	} else if reflect.PtrTo(outt).Implements(getterIface) {
+		style = getterAddr
+	} else {
+		style = getterNone
+	}
+	getterStyles[outt] = style
+	return style
+}
+
+func getGetter(outt reflect.Type, out reflect.Value) Getter {
+	style := getterStyle(outt)
+	if style == getterNone {
+		return nil
+	}
+	if style == getterAddr {
+		if !out.CanAddr() {
+			return nil
+		}
+		return out.Addr().Interface().(Getter)
+	}
+	if style == getterTypeVal && out.Kind() == reflect.Ptr && out.IsNil() {
+		return nil
+	}
+	return out.Interface().(Getter)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+	out []byte
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+	for {
+		if vi, ok := v.Interface().(Getter); ok {
+			getv, err := vi.GetBSON()
+			if err != nil {
+				panic(err)
+			}
+			v = reflect.ValueOf(getv)
+			continue
+		}
+		if v.Kind() == reflect.Ptr {
+			v = v.Elem()
+			continue
+		}
+		break
+	}
+
+	if v.Type() == typeRaw {
+		raw := v.Interface().(Raw)
+		if raw.Kind != 0x03 && raw.Kind != 0x00 {
+			panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+		}
+		if len(raw.Data) == 0 {
+			panic("Attempted to marshal empty Raw document")
+		}
+		e.addBytes(raw.Data...)
+		return
+	}
+
+	start := e.reserveInt32()
+
+	switch v.Kind() {
+	case reflect.Map:
+		e.addMap(v)
+	case reflect.Struct:
+		e.addStruct(v)
+	case reflect.Array, reflect.Slice:
+		e.addSlice(v)
+	default:
+		panic("Can't marshal " + v.Type().String() + " as a BSON document")
+	}
+
+	e.addBytes(0)
+	e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+	for _, k := range v.MapKeys() {
+		e.addElem(fmt.Sprint(k), v.MapIndex(k), false)
+	}
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+	sinfo, err := getStructInfo(v.Type())
+	if err != nil {
+		panic(err)
+	}
+	var value reflect.Value
+	if sinfo.InlineMap >= 0 {
+		m := v.Field(sinfo.InlineMap)
+		if m.Len() > 0 {
+			for _, k := range m.MapKeys() {
+				ks := k.String()
+				if _, found := sinfo.FieldsMap[ks]; found {
+					panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+				}
+				e.addElem(ks, m.MapIndex(k), false)
+			}
+		}
+	}
+	for _, info := range sinfo.FieldsList {
+		if info.Inline == nil {
+			value = v.Field(info.Num)
+		} else {
+			// as pointers to struct are allowed here,
+			// there is no guarantee that pointer won't be nil.
+			//
+			// It is expected allowed behaviour
+			// so info.Inline MAY consist index to a nil pointer
+			// and that is why we safely call v.FieldByIndex and just continue on panic
+			field, errField := safeFieldByIndex(v, info.Inline)
+			if errField != nil {
+				continue
+			}
+
+			value = field
+		}
+		if info.OmitEmpty && isZero(value) {
+			continue
+		}
+		if useRespectNilValues &&
+			(value.Kind() == reflect.Slice || value.Kind() == reflect.Map) &&
+			value.IsNil() {
+			e.addElem(info.Key, reflect.ValueOf(nil), info.MinSize)
+			continue
+		}
+		e.addElem(info.Key, value, info.MinSize)
+	}
+}
+
+func safeFieldByIndex(v reflect.Value, index []int) (result reflect.Value, err error) {
+	defer func() {
+		if recovered := recover(); recovered != nil {
+			switch r := recovered.(type) {
+			case string:
+				err = fmt.Errorf("%s", r)
+			case error:
+				err = r
+			}
+		}
+	}()
+
+	result = v.FieldByIndex(index)
+	return
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		if vt == typeTime {
+			return v.Interface().(time.Time).IsZero()
+		}
+		for i := 0; i < v.NumField(); i++ {
+			if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+	vi := v.Interface()
+	if d, ok := vi.(D); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if d, ok := vi.(RawD); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	l := v.Len()
+	et := v.Type().Elem()
+	if et == typeDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(DocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if et == typeRawDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(RawDocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	for i := 0; i < l; i++ {
+		e.addElem(itoa(i), v.Index(i), false)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+	e.addBytes(kind)
+	e.addBytes([]byte(name)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+	if !v.IsValid() {
+		e.addElemName(0x0A, name)
+		return
+	}
+
+	if getter := getGetter(v.Type(), v); getter != nil {
+		getv, err := getter.GetBSON()
+		if err != nil {
+			panic(err)
+		}
+		e.addElem(name, reflect.ValueOf(getv), minSize)
+		return
+	}
+
+	switch v.Kind() {
+
+	case reflect.Interface:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.Ptr:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.String:
+		s := v.String()
+		switch v.Type() {
+		case typeObjectId:
+			if len(s) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s)) + ")")
+			}
+			e.addElemName(0x07, name)
+			e.addBytes([]byte(s)...)
+		case typeSymbol:
+			e.addElemName(0x0E, name)
+			e.addStr(s)
+		case typeJSONNumber:
+			n := v.Interface().(json.Number)
+			if i, err := n.Int64(); err == nil {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			} else if f, err := n.Float64(); err == nil {
+				e.addElemName(0x01, name)
+				e.addFloat64(f)
+			} else {
+				panic("failed to convert json.Number to a number: " + s)
+			}
+		default:
+			e.addElemName(0x02, name)
+			e.addStr(s)
+		}
+
+	case reflect.Float32, reflect.Float64:
+		e.addElemName(0x01, name)
+		e.addFloat64(v.Float())
+
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		u := v.Uint()
+		if int64(u) < 0 {
+			panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+		} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+			e.addElemName(0x10, name)
+			e.addInt32(int32(u))
+		} else {
+			e.addElemName(0x12, name)
+			e.addInt64(int64(u))
+		}
+
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch v.Type() {
+		case typeMongoTimestamp:
+			e.addElemName(0x11, name)
+			e.addInt64(v.Int())
+
+		case typeOrderKey:
+			if v.Int() == int64(MaxKey) {
+				e.addElemName(0x7F, name)
+			} else {
+				e.addElemName(0xFF, name)
+			}
+		case typeTimeDuration:
+			// Stored as int64
+			e.addElemName(0x12, name)
+
+			e.addInt64(int64(v.Int() / 1e6))
+		default:
+			i := v.Int()
+			if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+				// It fits into an int32, encode as such.
+				e.addElemName(0x10, name)
+				e.addInt32(int32(i))
+			} else {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			}
+		}
+
+	case reflect.Bool:
+		e.addElemName(0x08, name)
+		if v.Bool() {
+			e.addBytes(1)
+		} else {
+			e.addBytes(0)
+		}
+
+	case reflect.Map:
+		e.addElemName(0x03, name)
+		e.addDoc(v)
+
+	case reflect.Slice:
+		vt := v.Type()
+		et := vt.Elem()
+		if et.Kind() == reflect.Uint8 {
+			if arrayOps[name] {
+				e.addElemName(0x04, name)
+				e.addDoc(v)
+			} else {
+				e.addElemName(0x05, name)
+				e.addBinary(0x00, v.Bytes())
+			}
+		} else if et == typeDocElem || et == typeRawDocElem {
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Array:
+		et := v.Type().Elem()
+		if et.Kind() == reflect.Uint8 {
+			if arrayOps[name] {
+				e.addElemName(0x04, name)
+				e.addDoc(v)
+			} else {
+				e.addElemName(0x05, name)
+				if v.CanAddr() {
+					e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+				} else {
+					n := v.Len()
+					e.addInt32(int32(n))
+					e.addBytes(0x00)
+					for i := 0; i < n; i++ {
+						el := v.Index(i)
+						e.addBytes(byte(el.Uint()))
+					}
+				}
+			}
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Struct:
+		switch s := v.Interface().(type) {
+
+		case Raw:
+			kind := s.Kind
+			if kind == 0x00 {
+				kind = 0x03
+			}
+			if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+				panic("Attempted to marshal empty Raw document")
+			}
+			e.addElemName(kind, name)
+			e.addBytes(s.Data...)
+
+		case Binary:
+			e.addElemName(0x05, name)
+			e.addBinary(s.Kind, s.Data)
+
+		case Decimal128:
+			e.addElemName(0x13, name)
+			e.addInt64(int64(s.l))
+			e.addInt64(int64(s.h))
+
+		case DBPointer:
+			e.addElemName(0x0C, name)
+			e.addStr(s.Namespace)
+			if len(s.Id) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s.Id)) + ")")
+			}
+			e.addBytes([]byte(s.Id)...)
+
+		case RegEx:
+			e.addElemName(0x0B, name)
+			e.addCStr(s.Pattern)
+			options := runes(s.Options)
+			sort.Sort(options)
+			e.addCStr(string(options))
+
+		case JavaScript:
+			if s.Scope == nil {
+				e.addElemName(0x0D, name)
+				e.addStr(s.Code)
+			} else {
+				e.addElemName(0x0F, name)
+				start := e.reserveInt32()
+				e.addStr(s.Code)
+				e.addDoc(reflect.ValueOf(s.Scope))
+				e.setInt32(start, int32(len(e.out)-start))
+			}
+
+		case time.Time:
+			// MongoDB handles timestamps as milliseconds.
+			e.addElemName(0x09, name)
+			e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+		case url.URL:
+			e.addElemName(0x02, name)
+			e.addStr(s.String())
+
+		case undefined:
+			e.addElemName(0x06, name)
+
+		default:
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		}
+
+	default:
+		panic("Can't marshal " + v.Type().String() + " in a BSON document")
+	}
+}
+
+// -------------
+// Helper method for sorting regex options
+type runes []rune
+
+func (a runes) Len() int           { return len(a) }
+func (a runes) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a runes) Less(i, j int) bool { return a[i] < a[j] }
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+	if subtype == 0x02 {
+		// Wonder how that brilliant idea came to life. Obsolete, luckily.
+		e.addInt32(int32(len(v) + 4))
+		e.addBytes(subtype)
+		e.addInt32(int32(len(v)))
+	} else {
+		e.addInt32(int32(len(v)))
+		e.addBytes(subtype)
+	}
+	e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+	e.addInt32(int32(len(v) + 1))
+	e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+	e.addBytes([]byte(v)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+	pos = len(e.out)
+	e.addBytes(0, 0, 0, 0)
+	return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+	e.out[pos+0] = byte(v)
+	e.out[pos+1] = byte(v >> 8)
+	e.out[pos+2] = byte(v >> 16)
+	e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+	u := uint32(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+	u := uint64(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+		byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+	e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+	e.out = append(e.out, v...)
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/json.go b/go/vendor/github.com/globalsign/mgo/bson/json.go
new file mode 100644
index 0000000..045c713
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/json.go
@@ -0,0 +1,384 @@
+package bson
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/globalsign/mgo/internal/json"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&jsonExt)
+	return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+	e := json.NewEncoder(&buf)
+	e.Extend(&jsonExt)
+	err := e.Encode(value)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&funcExt)
+	return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+	jsonExt.DecodeUnquotedKeys(true)
+	jsonExt.DecodeTrailingCommas(true)
+
+	funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+	jsonExt.DecodeKeyed("$binary", jdecBinary)
+	jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+	jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+	jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+	funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+	funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+	jsonExt.DecodeKeyed("$date", jdecDate)
+	jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+	jsonExt.EncodeType(time.Time{}, jencDate)
+
+	funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+	jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+	jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+	funcExt.DecodeConst("undefined", Undefined)
+
+	jsonExt.DecodeKeyed("$regex", jdecRegEx)
+	jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+	funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+	jsonExt.DecodeKeyed("$oid", jdecObjectId)
+	jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+	jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+	funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+	jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+	funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+	jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+	jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+	jsonExt.EncodeType(int64(0), jencNumberLong)
+	jsonExt.EncodeType(int(0), jencInt)
+
+	funcExt.DecodeConst("MinKey", MinKey)
+	funcExt.DecodeConst("MaxKey", MaxKey)
+	jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+	jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+	jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+	jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+	jsonExt.EncodeType(Undefined, jencUndefined)
+
+	jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, format, args...)
+	return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+	var v struct {
+		Binary []byte `json:"$binary"`
+		Type   string `json:"$type"`
+		Func   struct {
+			Binary []byte `json:"$binary"`
+			Type   int64  `json:"$type"`
+		} `json:"$binaryFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+
+	var binData []byte
+	var binKind int64
+	if v.Type == "" && v.Binary == nil {
+		binData = v.Func.Binary
+		binKind = v.Func.Type
+	} else if v.Type == "" {
+		return v.Binary, nil
+	} else {
+		binData = v.Binary
+		binKind, err = strconv.ParseInt(v.Type, 0, 64)
+		if err != nil {
+			binKind = -1
+		}
+	}
+
+	if binKind == 0 {
+		return binData, nil
+	}
+	if binKind < 0 || binKind > 255 {
+		return nil, fmt.Errorf("invalid type in binary object: %s", data)
+	}
+
+	return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+	in := v.([]byte)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+	base64.StdEncoding.Encode(out, in)
+	return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+	in := v.(Binary)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+	base64.StdEncoding.Encode(out, in.Data)
+	return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z07:00"
+
+func jdecDate(data []byte) (interface{}, error) {
+	var v struct {
+		S    string `json:"$date"`
+		Func struct {
+			S string
+		} `json:"$dateFunc"`
+	}
+	_ = jdec(data, &v)
+	if v.S == "" {
+		v.S = v.Func.S
+	}
+	if v.S != "" {
+		var errs []string
+		for _, format := range []string{jdateFormat, "2006-01-02"} {
+			t, err := time.Parse(format, v.S)
+			if err == nil {
+				return t, nil
+			}
+			errs = append(errs, err.Error())
+		}
+		return nil, fmt.Errorf("cannot parse date: %q [%s]", v.S, strings.Join(errs, ", "))
+	}
+
+	var vn struct {
+		Date struct {
+			N int64 `json:"$numberLong,string"`
+		} `json:"$date"`
+		Func struct {
+			S int64
+		} `json:"$dateFunc"`
+	}
+	err := jdec(data, &vn)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse date: %q", data)
+	}
+	n := vn.Date.N
+	if n == 0 {
+		n = vn.Func.S
+	}
+	return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+	t := v.(time.Time)
+	return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+	var v struct {
+		Func struct {
+			T int32 `json:"t"`
+			I int32 `json:"i"`
+		} `json:"$timestamp"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+	ts := uint64(v.(MongoTimestamp))
+	return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+	var v struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+	re := v.(RegEx)
+	type regex struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+	var v struct {
+		Id   string `json:"$oid"`
+		Func struct {
+			Id string
+		} `json:"$oidFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.Id == "" {
+		v.Id = v.Func.Id
+	}
+	return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+	return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+	// TODO Support unmarshaling $ref and $id into the input value.
+	var v struct {
+		Obj map[string]interface{} `json:"$dbrefFunc"`
+	}
+	// TODO Fix this. Must not be required.
+	v.Obj = make(map[string]interface{})
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+	var v struct {
+		N    int64 `json:"$numberLong,string"`
+		Func struct {
+			N int64 `json:",string"`
+		} `json:"$numberLongFunc"`
+	}
+	var vn struct {
+		N    int64 `json:"$numberLong"`
+		Func struct {
+			N int64
+		} `json:"$numberLongFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		err = jdec(data, &vn)
+		v.N = vn.N
+		v.Func.N = vn.Func.N
+	}
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 0 {
+		return v.N, nil
+	}
+	return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+	n := v.(int64)
+	f := `{"$numberLong":"%d"}`
+	if n <= 1<<53 {
+		f = `{"$numberLong":%d}`
+	}
+	return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+	n := v.(int)
+	f := `{"$numberLong":"%d"}`
+	if int64(n) <= 1<<53 {
+		f = `%d`
+	}
+	return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$minKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $minKey object: %s", data)
+	}
+	return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$maxKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+	}
+	return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+	switch v.(orderKey) {
+	case MinKey:
+		return []byte(`{"$minKey":1}`), nil
+	case MaxKey:
+		return []byte(`{"$maxKey":1}`), nil
+	}
+	panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+	var v struct {
+		B bool `json:"$undefined"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if !v.B {
+		return nil, fmt.Errorf("invalid $undefined object: %s", data)
+	}
+	return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+	return []byte(`{"$undefined":true}`), nil
+}
diff --git a/go/vendor/github.com/globalsign/mgo/bson/stream.go b/go/vendor/github.com/globalsign/mgo/bson/stream.go
new file mode 100644
index 0000000..4665284
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/bson/stream.go
@@ -0,0 +1,90 @@
+package bson
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+const (
+	// MinDocumentSize is the size of the smallest possible valid BSON document:
+	// an int32 size header + 0x00 (end of document).
+	MinDocumentSize = 5
+
+	// MaxDocumentSize is the largest possible size for a BSON document allowed by MongoDB,
+	// that is, 16 MiB (see https://docs.mongodb.com/manual/reference/limits/).
+	MaxDocumentSize = 16777216
+)
+
+// ErrInvalidDocumentSize is an error returned when a BSON document's header
+// contains a size smaller than MinDocumentSize or greater than MaxDocumentSize.
+type ErrInvalidDocumentSize struct {
+	DocumentSize int32
+}
+
+func (e ErrInvalidDocumentSize) Error() string {
+	return fmt.Sprintf("invalid document size %d", e.DocumentSize)
+}
+
+// A Decoder reads and decodes BSON values from an input stream.
+type Decoder struct {
+	source io.Reader
+}
+
+// NewDecoder returns a new Decoder that reads from source.
+// It does not add any extra buffering, and may not read data from source beyond the BSON values requested.
+func NewDecoder(source io.Reader) *Decoder {
+	return &Decoder{source: source}
+}
+
+// Decode reads the next BSON-encoded value from its input and stores it in the value pointed to by v.
+// See the documentation for Unmarshal for details about the conversion of BSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	// BSON documents start with their size as a *signed* int32.
+	var docSize int32
+	if err = binary.Read(dec.source, binary.LittleEndian, &docSize); err != nil {
+		return
+	}
+
+	if docSize < MinDocumentSize || docSize > MaxDocumentSize {
+		return ErrInvalidDocumentSize{DocumentSize: docSize}
+	}
+
+	docBuffer := bytes.NewBuffer(make([]byte, 0, docSize))
+	if err = binary.Write(docBuffer, binary.LittleEndian, docSize); err != nil {
+		return
+	}
+
+	// docSize is the *full* document's size (including the 4-byte size header,
+	// which has already been read).
+	if _, err = io.CopyN(docBuffer, dec.source, int64(docSize-4)); err != nil {
+		return
+	}
+
+	// Let Unmarshal handle the rest.
+	defer handleErr(&err)
+	return Unmarshal(docBuffer.Bytes(), v)
+}
+
+// An Encoder encodes and writes BSON values to an output stream.
+type Encoder struct {
+	target io.Writer
+}
+
+// NewEncoder returns a new Encoder that writes to target.
+func NewEncoder(target io.Writer) *Encoder {
+	return &Encoder{target: target}
+}
+
+// Encode encodes v to BSON, and if successful writes it to the Encoder's output stream.
+// See the documentation for Marshal for details about the conversion of Go values to BSON.
+func (enc *Encoder) Encode(v interface{}) error {
+	data, err := Marshal(v)
+	if err != nil {
+		return err
+	}
+
+	_, err = enc.target.Write(data)
+	return err
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/LICENSE b/go/vendor/github.com/globalsign/mgo/internal/json/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/decode.go b/go/vendor/github.com/globalsign/mgo/internal/json/decode.go
new file mode 100644
index 0000000..d5ca1f9
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/decode.go
@@ -0,0 +1,1685 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"reflect"
+	"runtime"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	string, for JSON strings
+//	[]interface{}, for JSON arrays
+//	map[string]interface{}, for JSON objects
+//	nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores key-
+// value pairs from the JSON object into the map.  The map's key type must
+// either be a string or implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+	// Check for well-formedness.
+	// Avoids filling out half a data structure
+	// before discovering a JSON syntax error.
+	var d decodeState
+	err := checkValid(data, &d.scan)
+	if err != nil {
+		return err
+	}
+
+	d.init(data)
+	return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+	UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+	Value  string       // description of JSON value - "bool", "array", "number -5"
+	Type   reflect.Type // type of Go value it could not be assigned to
+	Offset int64        // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+	Key   string
+	Type  reflect.Type
+	Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+	Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+	if e.Type == nil {
+		return "json: Unmarshal(nil)"
+	}
+
+	if e.Type.Kind() != reflect.Ptr {
+		return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+	}
+	return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = r.(error)
+		}
+	}()
+
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return &InvalidUnmarshalError{reflect.TypeOf(v)}
+	}
+
+	d.scan.reset()
+	// We decode rv not rv.Elem because the Unmarshaler interface
+	// test must be applied at the top level of the value.
+	d.value(rv)
+	return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+	// This function implements the JSON numbers grammar.
+	// See https://tools.ietf.org/html/rfc7159#section-6
+	// and http://json.org/number.gif
+
+	if s == "" {
+		return false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		s = s[1:]
+		if s == "" {
+			return false
+		}
+	}
+
+	// Digits
+	switch {
+	default:
+		return false
+
+	case s[0] == '0':
+		s = s[1:]
+
+	case '1' <= s[0] && s[0] <= '9':
+		s = s[1:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		s = s[2:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			if s == "" {
+				return false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// Make sure we are at the end.
+	return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+	data       []byte
+	off        int // read offset in data
+	scan       scanner
+	nextscan   scanner // for calls to nextValue
+	savedError error
+	useNumber  bool
+	ext        Extension
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+	d.data = data
+	d.off = 0
+	d.savedError = nil
+	return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+	panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+	if d.savedError == nil {
+		d.savedError = err
+	}
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+	c := d.data[d.off]
+	item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+	if err != nil {
+		d.error(err)
+	}
+	d.off = len(d.data) - len(rest)
+
+	// Our scanner has seen the opening brace/bracket
+	// and thinks we're still in the middle of the object.
+	// invent a closing brace/bracket to get it out.
+	if c == '{' {
+		d.scan.step(&d.scan, '}')
+	} else if c == '[' {
+		d.scan.step(&d.scan, ']')
+	} else {
+		// Was inside a function name. Get out of it.
+		d.scan.step(&d.scan, '(')
+		d.scan.step(&d.scan, ')')
+	}
+
+	return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+	var newOp int
+	for {
+		if d.off >= len(d.data) {
+			newOp = d.scan.eof()
+			d.off = len(d.data) + 1 // mark processed EOF with len+1
+		} else {
+			c := d.data[d.off]
+			d.off++
+			newOp = d.scan.step(&d.scan, c)
+		}
+		if newOp != op {
+			break
+		}
+	}
+	return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+	if !v.IsValid() {
+		_, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+		if err != nil {
+			d.error(err)
+		}
+		d.off = len(d.data) - len(rest)
+
+		// d.scan thinks we're still at the beginning of the item.
+		// Feed in an empty string - the shortest, simplest value -
+		// so that it knows we got to the end of the value.
+		if d.scan.redo {
+			// rewind.
+			d.scan.redo = false
+			d.scan.step = stateBeginValue
+		}
+		d.scan.step(&d.scan, '"')
+		d.scan.step(&d.scan, '"')
+
+		n := len(d.scan.parseState)
+		if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+			// d.scan thinks we just read an object key; finish the object
+			d.scan.step(&d.scan, ':')
+			d.scan.step(&d.scan, '"')
+			d.scan.step(&d.scan, '"')
+			d.scan.step(&d.scan, '}')
+		}
+
+		return
+	}
+
+	switch op := d.scanWhile(scanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case scanBeginArray:
+		d.array(v)
+
+	case scanBeginObject:
+		d.object(v)
+
+	case scanBeginLiteral:
+		d.literal(v)
+
+	case scanBeginName:
+		d.name(v)
+	}
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+	switch op := d.scanWhile(scanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case scanBeginArray:
+		d.array(reflect.Value{})
+
+	case scanBeginObject:
+		d.object(reflect.Value{})
+
+	case scanBeginName:
+		switch v := d.nameInterface().(type) {
+		case nil, string:
+			return v
+		}
+
+	case scanBeginLiteral:
+		switch v := d.literalInterface().(type) {
+		case nil, string:
+			return v
+		}
+	}
+	return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(Unmarshaler); ok {
+				return u, nil, v
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, v
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+		d.off--
+		d.next()
+		return
+	}
+
+	v = pv
+
+	// Check type of target.
+	switch v.Kind() {
+	case reflect.Interface:
+		if v.NumMethod() == 0 {
+			// Decoding into nil interface?  Switch to non-reflect code.
+			v.Set(reflect.ValueOf(d.arrayInterface()))
+			return
+		}
+		// Otherwise it's invalid.
+		fallthrough
+	default:
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+		d.off--
+		d.next()
+		return
+	case reflect.Array:
+	case reflect.Slice:
+		break
+	}
+
+	i := 0
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		// Get element of array, growing if necessary.
+		if v.Kind() == reflect.Slice {
+			// Grow slice if necessary
+			if i >= v.Cap() {
+				newcap := v.Cap() + v.Cap()/2
+				if newcap < 4 {
+					newcap = 4
+				}
+				newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+				reflect.Copy(newv, v)
+				v.Set(newv)
+			}
+			if i >= v.Len() {
+				v.SetLen(i + 1)
+			}
+		}
+
+		if i < v.Len() {
+			// Decode into element.
+			d.value(v.Index(i))
+		} else {
+			// Ran out of fixed array: skip.
+			d.value(reflect.Value{})
+		}
+		i++
+
+		// Next token must be , or ].
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+		if op != scanArrayValue {
+			d.error(errPhase)
+		}
+	}
+
+	if i < v.Len() {
+		if v.Kind() == reflect.Array {
+			// Array. Zero the rest.
+			z := reflect.Zero(v.Type().Elem())
+			for ; i < v.Len(); i++ {
+				v.Index(i).Set(z)
+			}
+		} else {
+			v.SetLen(i)
+		}
+	}
+	if i == 0 && v.Kind() == reflect.Slice {
+		v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+	}
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if d.storeKeyed(pv) {
+		return
+	}
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		v.Set(reflect.ValueOf(d.objectInterface()))
+		return
+	}
+
+	// Check type of target:
+	//   struct or
+	//   map[string]T or map[encoding.TextUnmarshaler]T
+	switch v.Kind() {
+	case reflect.Map:
+		// Map key must either have string kind or be an encoding.TextUnmarshaler.
+		t := v.Type()
+		if t.Key().Kind() != reflect.String &&
+			!reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	var mapElem reflect.Value
+
+	empty := true
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			if !empty && !d.ext.trailingCommas {
+				d.syntaxError("beginning of object key string")
+			}
+			break
+		}
+		empty = false
+		if op == scanBeginName {
+			if !d.ext.unquotedKeys {
+				d.syntaxError("beginning of object key string")
+			}
+		} else if op != scanBeginLiteral {
+			d.error(errPhase)
+		}
+		unquotedKey := op == scanBeginName
+
+		// Read key.
+		start := d.off - 1
+		op = d.scanWhile(scanContinue)
+		item := d.data[start : d.off-1]
+		var key []byte
+		if unquotedKey {
+			key = item
+			// TODO Fix code below to quote item when necessary.
+		} else {
+			var ok bool
+			key, ok = unquoteBytes(item)
+			if !ok {
+				d.error(errPhase)
+			}
+		}
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read : before value.
+		if op == scanSkipSpace {
+			op = d.scanWhile(scanSkipSpace)
+		}
+		if op != scanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kt := v.Type().Key()
+			var kv reflect.Value
+			switch {
+			case kt.Kind() == reflect.String:
+				kv = reflect.ValueOf(key).Convert(v.Type().Key())
+			case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+				kv = reflect.New(v.Type().Key())
+				d.literalStore(item, kv, true)
+				kv = kv.Elem()
+			default:
+				panic("json: Unexpected key type") // should never occur
+			}
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or }.
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			break
+		}
+		if op != scanObjectValue {
+			d.error(errPhase)
+		}
+	}
+}
+
+// isNull returns whether there's a null literal at the provided offset.
+func (d *decodeState) isNull(off int) bool {
+	if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' {
+		return false
+	}
+	d.nextscan.reset()
+	for i, c := range d.data[off:] {
+		if i > 4 {
+			return false
+		}
+		switch d.nextscan.step(&d.nextscan, c) {
+		case scanContinue, scanBeginName:
+			continue
+		}
+		break
+	}
+	return true
+}
+
+// name consumes a const or function from d.data[d.off-1:], decoding into the value v.
+// the first byte of the function name has been read already.
+func (d *decodeState) name(v reflect.Value) {
+	if d.isNull(d.off - 1) {
+		d.literal(v)
+		return
+	}
+
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if d.storeKeyed(pv) {
+		return
+	}
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over function in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		out := d.nameInterface()
+		if out == nil {
+			v.Set(reflect.Zero(v.Type()))
+		} else {
+			v.Set(reflect.ValueOf(out))
+		}
+		return
+	}
+
+	nameStart := d.off - 1
+
+	op := d.scanWhile(scanContinue)
+
+	name := d.data[nameStart : d.off-1]
+	if op != scanParam {
+		// Back up so the byte just read is consumed next.
+		d.off--
+		d.scan.undo(op)
+		if l, ok := d.convertLiteral(name); ok {
+			d.storeValue(v, l)
+			return
+		}
+		d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+	}
+
+	funcName := string(name)
+	funcData := d.ext.funcs[funcName]
+	if funcData.key == "" {
+		d.error(fmt.Errorf("json: unknown function %q", funcName))
+	}
+
+	// Check type of target:
+	//   struct or
+	//   map[string]T or map[encoding.TextUnmarshaler]T
+	switch v.Kind() {
+	case reflect.Map:
+		// Map key must either have string kind or be an encoding.TextUnmarshaler.
+		t := v.Type()
+		if t.Key().Kind() != reflect.String &&
+			!reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	// TODO Fix case of func field as map.
+	//topv := v
+
+	// Figure out field corresponding to function.
+	key := []byte(funcData.key)
+	if v.Kind() == reflect.Map {
+		elemType := v.Type().Elem()
+		v = reflect.New(elemType).Elem()
+	} else {
+		var f *field
+		fields := cachedTypeFields(v.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if bytes.Equal(ff.nameBytes, key) {
+				f = ff
+				break
+			}
+			if f == nil && ff.equalFold(ff.nameBytes, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			for _, i := range f.index {
+				if v.Kind() == reflect.Ptr {
+					if v.IsNil() {
+						v.Set(reflect.New(v.Type().Elem()))
+					}
+					v = v.Elem()
+				}
+				v = v.Field(i)
+			}
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					v.Set(reflect.New(v.Type().Elem()))
+				}
+				v = v.Elem()
+			}
+		}
+	}
+
+	// Check for unmarshaler on func field itself.
+	u, _, _ = d.indirect(v, false)
+	if u != nil {
+		d.off = nameStart
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	var mapElem reflect.Value
+
+	// Parse function arguments.
+	for i := 0; ; i++ {
+		// closing ) - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		if i >= len(funcData.args) {
+			d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+		}
+		key := []byte(funcData.args[i])
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kt := v.Type().Key()
+			var kv reflect.Value
+			switch {
+			case kt.Kind() == reflect.String:
+				kv = reflect.ValueOf(key).Convert(v.Type().Key())
+			case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+				kv = reflect.New(v.Type().Key())
+				d.literalStore(key, kv, true)
+				kv = kv.Elem()
+			default:
+				panic("json: Unexpected key type") // should never occur
+			}
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or ).
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+		if op != scanParam {
+			d.error(errPhase)
+		}
+	}
+}
+
+// keyed attempts to decode an object or function using a keyed doc extension,
+// and returns the value and true on success, or nil and false otherwise.
+func (d *decodeState) keyed() (interface{}, bool) {
+	if len(d.ext.keyed) == 0 {
+		return nil, false
+	}
+
+	unquote := false
+
+	// Look-ahead first key to check for a keyed document extension.
+	d.nextscan.reset()
+	var start, end int
+	for i, c := range d.data[d.off-1:] {
+		switch op := d.nextscan.step(&d.nextscan, c); op {
+		case scanSkipSpace, scanContinue, scanBeginObject:
+			continue
+		case scanBeginLiteral, scanBeginName:
+			unquote = op == scanBeginLiteral
+			start = i
+			continue
+		}
+		end = i
+		break
+	}
+
+	name := bytes.Trim(d.data[d.off-1+start:d.off-1+end], " \n\t")
+
+	var key []byte
+	var ok bool
+	if unquote {
+		key, ok = unquoteBytes(name)
+		if !ok {
+			d.error(errPhase)
+		}
+	} else {
+		funcData, ok := d.ext.funcs[string(name)]
+		if !ok {
+			return nil, false
+		}
+		key = []byte(funcData.key)
+	}
+
+	decode, ok := d.ext.keyed[string(key)]
+	if !ok {
+		return nil, false
+	}
+
+	d.off--
+	out, err := decode(d.next())
+	if err != nil {
+		d.error(err)
+	}
+	return out, true
+}
+
+func (d *decodeState) storeKeyed(v reflect.Value) bool {
+	keyed, ok := d.keyed()
+	if !ok {
+		return false
+	}
+	d.storeValue(v, keyed)
+	return true
+}
+
+var (
+	trueBytes  = []byte("true")
+	falseBytes = []byte("false")
+	nullBytes  = []byte("null")
+)
+
+func (d *decodeState) storeValue(v reflect.Value, from interface{}) {
+	switch from {
+	case nil:
+		d.literalStore(nullBytes, v, false)
+		return
+	case true:
+		d.literalStore(trueBytes, v, false)
+		return
+	case false:
+		d.literalStore(falseBytes, v, false)
+		return
+	}
+	fromv := reflect.ValueOf(from)
+	for fromv.Kind() == reflect.Ptr && !fromv.IsNil() {
+		fromv = fromv.Elem()
+	}
+	fromt := fromv.Type()
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	vt := v.Type()
+	if fromt.AssignableTo(vt) {
+		v.Set(fromv)
+	} else if fromt.ConvertibleTo(vt) {
+		v.Set(fromv.Convert(vt))
+	} else {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+	}
+}
+
+func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) {
+	if len(name) == 0 {
+		return nil, false
+	}
+	switch name[0] {
+	case 't':
+		if bytes.Equal(name, trueBytes) {
+			return true, true
+		}
+	case 'f':
+		if bytes.Equal(name, falseBytes) {
+			return false, true
+		}
+	case 'n':
+		if bytes.Equal(name, nullBytes) {
+			return nil, true
+		}
+	}
+	if l, ok := d.ext.consts[string(name)]; ok {
+		return l, true
+	}
+	return nil, false
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(scanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+
+	d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+	if d.useNumber {
+		return Number(s), nil
+	}
+	f, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+	}
+	return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+	// Check for unmarshaler.
+	if len(item) == 0 {
+		//Empty string given
+		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+		return
+	}
+	wantptr := item[0] == 'n' // null
+	u, ut, pv := d.indirect(v, wantptr)
+	if u != nil {
+		err := u.UnmarshalJSON(item)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		if item[0] != '"' {
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+			}
+			return
+		}
+		s, ok := unquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		err := ut.UnmarshalText(s)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	v = pv
+
+	switch c := item[0]; c {
+	case 'n': // null
+		switch v.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+			v.Set(reflect.Zero(v.Type()))
+			// otherwise, ignore null for primitives/string
+		}
+	case 't', 'f': // true, false
+		value := c == 't'
+		switch v.Kind() {
+		default:
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+			}
+		case reflect.Bool:
+			v.SetBool(value)
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(value))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+			}
+		}
+
+	case '"': // string
+		s, ok := unquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		switch v.Kind() {
+		default:
+			d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+		case reflect.Slice:
+			if v.Type().Elem().Kind() != reflect.Uint8 {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+				break
+			}
+			b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+			n, err := base64.StdEncoding.Decode(b, s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			v.SetBytes(b[:n])
+		case reflect.String:
+			v.SetString(string(s))
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(string(s)))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+			}
+		}
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		s := string(item)
+		switch v.Kind() {
+		default:
+			if v.Kind() == reflect.String && v.Type() == numberType {
+				v.SetString(s)
+				if !isValidNumber(s) {
+					d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+				}
+				break
+			}
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+			}
+		case reflect.Interface:
+			n, err := d.convertNumber(s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			if v.NumMethod() != 0 {
+				d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+				break
+			}
+			v.Set(reflect.ValueOf(n))
+
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			n, err := strconv.ParseInt(s, 10, 64)
+			if err != nil || v.OverflowInt(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetInt(n)
+
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			n, err := strconv.ParseUint(s, 10, 64)
+			if err != nil || v.OverflowUint(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetUint(n)
+
+		case reflect.Float32, reflect.Float64:
+			n, err := strconv.ParseFloat(s, v.Type().Bits())
+			if err != nil || v.OverflowFloat(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetFloat(n)
+		}
+	}
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+	switch d.scanWhile(scanSkipSpace) {
+	default:
+		d.error(errPhase)
+		panic("unreachable")
+	case scanBeginArray:
+		return d.arrayInterface()
+	case scanBeginObject:
+		return d.objectInterface()
+	case scanBeginLiteral:
+		return d.literalInterface()
+	case scanBeginName:
+		return d.nameInterface()
+	}
+}
+
+func (d *decodeState) syntaxError(expected string) {
+	msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected)
+	d.error(&SyntaxError{msg, int64(d.off)})
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+	var v = make([]interface{}, 0)
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			if len(v) > 0 && !d.ext.trailingCommas {
+				d.syntaxError("beginning of value")
+			}
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		v = append(v, d.valueInterface())
+
+		// Next token must be , or ].
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+		if op != scanArrayValue {
+			d.error(errPhase)
+		}
+	}
+	return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() interface{} {
+	v, ok := d.keyed()
+	if ok {
+		return v
+	}
+
+	m := make(map[string]interface{})
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			if len(m) > 0 && !d.ext.trailingCommas {
+				d.syntaxError("beginning of object key string")
+			}
+			break
+		}
+		if op == scanBeginName {
+			if !d.ext.unquotedKeys {
+				d.syntaxError("beginning of object key string")
+			}
+		} else if op != scanBeginLiteral {
+			d.error(errPhase)
+		}
+		unquotedKey := op == scanBeginName
+
+		// Read string key.
+		start := d.off - 1
+		op = d.scanWhile(scanContinue)
+		item := d.data[start : d.off-1]
+		var key string
+		if unquotedKey {
+			key = string(item)
+		} else {
+			var ok bool
+			key, ok = unquote(item)
+			if !ok {
+				d.error(errPhase)
+			}
+		}
+
+		// Read : before value.
+		if op == scanSkipSpace {
+			op = d.scanWhile(scanSkipSpace)
+		}
+		if op != scanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		m[key] = d.valueInterface()
+
+		// Next token must be , or }.
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			break
+		}
+		if op != scanObjectValue {
+			d.error(errPhase)
+		}
+	}
+	return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(scanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+	item := d.data[start:d.off]
+
+	switch c := item[0]; c {
+	case 'n': // null
+		return nil
+
+	case 't', 'f': // true, false
+		return c == 't'
+
+	case '"': // string
+		s, ok := unquote(item)
+		if !ok {
+			d.error(errPhase)
+		}
+		return s
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			d.error(errPhase)
+		}
+		n, err := d.convertNumber(string(item))
+		if err != nil {
+			d.saveError(err)
+		}
+		return n
+	}
+}
+
+// nameInterface is like function but returns map[string]interface{}.
+func (d *decodeState) nameInterface() interface{} {
+	v, ok := d.keyed()
+	if ok {
+		return v
+	}
+
+	nameStart := d.off - 1
+
+	op := d.scanWhile(scanContinue)
+
+	name := d.data[nameStart : d.off-1]
+	if op != scanParam {
+		// Back up so the byte just read is consumed next.
+		d.off--
+		d.scan.undo(op)
+		if l, ok := d.convertLiteral(name); ok {
+			return l
+		}
+		d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+	}
+
+	funcName := string(name)
+	funcData := d.ext.funcs[funcName]
+	if funcData.key == "" {
+		d.error(fmt.Errorf("json: unknown function %q", funcName))
+	}
+
+	m := make(map[string]interface{})
+	for i := 0; ; i++ {
+		// Look ahead for ) - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		if i >= len(funcData.args) {
+			d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+		}
+		m[funcData.args[i]] = d.valueInterface()
+
+		// Next token must be , or ).
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+		if op != scanParam {
+			d.error(errPhase)
+		}
+	}
+	return map[string]interface{}{funcData.key: m}
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+	if err != nil {
+		return -1
+	}
+	return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+	s, ok = unquoteBytes(s)
+	t = string(s)
+	return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+	if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+		return
+	}
+	s = s[1 : len(s)-1]
+
+	// Check for unusual characters. If there are none,
+	// then no unquoting is needed, so return a slice of the
+	// original bytes.
+	r := 0
+	for r < len(s) {
+		c := s[r]
+		if c == '\\' || c == '"' || c < ' ' {
+			break
+		}
+		if c < utf8.RuneSelf {
+			r++
+			continue
+		}
+		rr, size := utf8.DecodeRune(s[r:])
+		if rr == utf8.RuneError && size == 1 {
+			break
+		}
+		r += size
+	}
+	if r == len(s) {
+		return s, true
+	}
+
+	b := make([]byte, len(s)+2*utf8.UTFMax)
+	w := copy(b, s[0:r])
+	for r < len(s) {
+		// Out of room?  Can only happen if s is full of
+		// malformed UTF-8 and we're replacing each
+		// byte with RuneError.
+		if w >= len(b)-2*utf8.UTFMax {
+			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+			copy(nb, b[0:w])
+			b = nb
+		}
+		switch c := s[r]; {
+		case c == '\\':
+			r++
+			if r >= len(s) {
+				return
+			}
+			switch s[r] {
+			default:
+				return
+			case '"', '\\', '/', '\'':
+				b[w] = s[r]
+				r++
+				w++
+			case 'b':
+				b[w] = '\b'
+				r++
+				w++
+			case 'f':
+				b[w] = '\f'
+				r++
+				w++
+			case 'n':
+				b[w] = '\n'
+				r++
+				w++
+			case 'r':
+				b[w] = '\r'
+				r++
+				w++
+			case 't':
+				b[w] = '\t'
+				r++
+				w++
+			case 'u':
+				r--
+				rr := getu4(s[r:])
+				if rr < 0 {
+					return
+				}
+				r += 6
+				if utf16.IsSurrogate(rr) {
+					rr1 := getu4(s[r:])
+					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+						// A valid pair; consume.
+						r += 6
+						w += utf8.EncodeRune(b[w:], dec)
+						break
+					}
+					// Invalid surrogate; fall back to replacement rune.
+					rr = unicode.ReplacementChar
+				}
+				w += utf8.EncodeRune(b[w:], rr)
+			}
+
+		// Quote, control characters are invalid.
+		case c == '"', c < ' ':
+			return
+
+		// ASCII
+		case c < utf8.RuneSelf:
+			b[w] = c
+			r++
+			w++
+
+		// Coerce to well-formed UTF-8.
+		default:
+			rr, size := utf8.DecodeRune(s[r:])
+			r += size
+			w += utf8.EncodeRune(b[w:], rr)
+		}
+	}
+	return b[0:w], true
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/encode.go b/go/vendor/github.com/globalsign/mgo/internal/json/encode.go
new file mode 100644
index 0000000..e4b8f86
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/encode.go
@@ -0,0 +1,1260 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 4627. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"reflect"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+// This escaping can be disabled using an Encoder with DisableHTMLEscaping.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+//   - the field's tag is "-", or
+//   - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+//   // Field is ignored by this package.
+//   Field int `json:"-"`
+//
+//   // Field appears in JSON as key "myName".
+//   Field int `json:"myName"`
+//
+//   // Field appears in JSON as key "myName" and
+//   // the field is omitted from the object if its value is empty,
+//   // as defined above.
+//   Field int `json:"myName,omitempty"`
+//
+//   // Field appears in JSON as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   // Note the leading comma.
+//   Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+//    Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a string
+// or implement encoding.TextMarshaler.  The map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+	e := &encodeState{}
+	err := e.marshal(v, encOpts{escapeHTML: true})
+	if err != nil {
+		return nil, err
+	}
+	return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	b, err := Marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	var buf bytes.Buffer
+	err = Indent(&buf, b, prefix, indent)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+	// The characters can only appear in string literals,
+	// so just scan the string one byte at a time.
+	start := 0
+	for i, c := range src {
+		if c == '<' || c == '>' || c == '&' {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+	Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+	return "json: unsupported type: " + e.Type.String()
+}
+
+// An UnsupportedValueError is returned by Marshal when attempting
+// to encode an unsupported value.
+type UnsupportedValueError struct {
+	Value reflect.Value
+	Str   string
+}
+
+func (e *UnsupportedValueError) Error() string {
+	return "json: unsupported value: " + e.Str
+}
+
+// InvalidUTF8Error before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+	S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+// A MarshalerError is returned by Marshal when attempting
+// to marshal an invalid JSON
+type MarshalerError struct {
+	Type reflect.Type
+	Err  error
+}
+
+func (e *MarshalerError) Error() string {
+	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+	bytes.Buffer // accumulated output
+	scratch      [64]byte
+	ext          Extension
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+	if v := encodeStatePool.Get(); v != nil {
+		e := v.(*encodeState)
+		e.Reset()
+		return e
+	}
+	return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			if s, ok := r.(string); ok {
+				panic(s)
+			}
+			err = r.(error)
+		}
+	}()
+	e.reflectValue(reflect.ValueOf(v), opts)
+	return nil
+}
+
+func (e *encodeState) error(err error) {
+	panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
+	valueEncoder(v)(e, v, opts)
+}
+
+type encOpts struct {
+	// quoted causes primitive fields to be encoded inside JSON strings.
+	quoted bool
+	// escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
+	escapeHTML bool
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
+
+var encoderCache struct {
+	sync.RWMutex
+	m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+	if !v.IsValid() {
+		return invalidValueEncoder
+	}
+	return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+	encoderCache.RLock()
+	f := encoderCache.m[t]
+	encoderCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// To deal with recursive types, populate the map with an
+	// indirect func before we build it. This type waits on the
+	// real func (f) to be ready and then calls it. This indirect
+	// func is only used for recursive types.
+	encoderCache.Lock()
+	if encoderCache.m == nil {
+		encoderCache.m = make(map[reflect.Type]encoderFunc)
+	}
+	var wg sync.WaitGroup
+	wg.Add(1)
+	encoderCache.m[t] = func(e *encodeState, v reflect.Value, opts encOpts) {
+		wg.Wait()
+		f(e, v, opts)
+	}
+	encoderCache.Unlock()
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	innerf := newTypeEncoder(t, true)
+	f = func(e *encodeState, v reflect.Value, opts encOpts) {
+		encode, ok := e.ext.encode[v.Type()]
+		if !ok {
+			innerf(e, v, opts)
+			return
+		}
+
+		b, err := encode(v.Interface())
+		if err == nil {
+			// copy JSON into buffer, checking validity.
+			err = compact(&e.Buffer, b, opts.escapeHTML)
+		}
+		if err != nil {
+			e.error(&MarshalerError{v.Type(), err})
+		}
+	}
+	wg.Done()
+	encoderCache.Lock()
+	encoderCache.m[t] = f
+	encoderCache.Unlock()
+	return f
+}
+
+var (
+	marshalerType     = reflect.TypeOf(new(Marshaler)).Elem()
+	textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+	if t.Implements(marshalerType) {
+		return marshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(marshalerType) {
+			return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	if t.Implements(textMarshalerType) {
+		return textMarshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(textMarshalerType) {
+			return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		return boolEncoder
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intEncoder
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintEncoder
+	case reflect.Float32:
+		return float32Encoder
+	case reflect.Float64:
+		return float64Encoder
+	case reflect.String:
+		return stringEncoder
+	case reflect.Interface:
+		return interfaceEncoder
+	case reflect.Struct:
+		return newStructEncoder(t)
+	case reflect.Map:
+		return newMapEncoder(t)
+	case reflect.Slice:
+		return newSliceEncoder(t)
+	case reflect.Array:
+		return newArrayEncoder(t)
+	case reflect.Ptr:
+		return newPtrEncoder(t)
+	default:
+		return unsupportedTypeEncoder
+	}
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, opts.escapeHTML)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, true)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+	e.stringBytes(b, opts.escapeHTML)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+	e.stringBytes(b, opts.escapeHTML)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	if v.Bool() {
+		e.WriteString("true")
+	} else {
+		e.WriteString("false")
+	}
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	f := v.Float()
+	if math.IsInf(f, 0) || math.IsNaN(f) {
+		e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+	}
+	b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+var (
+	float32Encoder = (floatEncoder(32)).encode
+	float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Type() == numberType {
+		numStr := v.String()
+		// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+		// we keep compatibility so check validity after this.
+		if numStr == "" {
+			numStr = "0" // Number's zero-val
+		}
+		if !isValidNumber(numStr) {
+			e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+		}
+		e.WriteString(numStr)
+		return
+	}
+	if opts.quoted {
+		sb, err := Marshal(v.String())
+		if err != nil {
+			e.error(err)
+		}
+		e.string(string(sb), opts.escapeHTML)
+	} else {
+		e.string(v.String(), opts.escapeHTML)
+	}
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.reflectValue(v.Elem(), opts)
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+	fields    []field
+	fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	e.WriteByte('{')
+	first := true
+	for i, f := range se.fields {
+		fv := fieldByIndex(v, f.index)
+		if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+			continue
+		}
+		if first {
+			first = false
+		} else {
+			e.WriteByte(',')
+		}
+		e.string(f.name, opts.escapeHTML)
+		e.WriteByte(':')
+		opts.quoted = f.quoted
+		se.fieldEncs[i](e, fv, opts)
+	}
+	e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+	fields := cachedTypeFields(t)
+	se := &structEncoder{
+		fields:    fields,
+		fieldEncs: make([]encoderFunc, len(fields)),
+	}
+	for i, f := range fields {
+		se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+	}
+	return se.encode
+}
+
+type mapEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.WriteByte('{')
+
+	// Extract and sort the keys.
+	keys := v.MapKeys()
+	sv := make([]reflectWithString, len(keys))
+	for i, v := range keys {
+		sv[i].v = v
+		if err := sv[i].resolve(); err != nil {
+			e.error(&MarshalerError{v.Type(), err})
+		}
+	}
+	sort.Sort(byString(sv))
+
+	for i, kv := range sv {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		e.string(kv.s, opts.escapeHTML)
+		e.WriteByte(':')
+		me.elemEnc(e, v.MapIndex(kv.v), opts)
+	}
+	e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+	if t.Key().Kind() != reflect.String && !t.Key().Implements(textMarshalerType) {
+		return unsupportedTypeEncoder
+	}
+	me := &mapEncoder{typeEncoder(t.Elem())}
+	return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	s := v.Bytes()
+	e.WriteByte('"')
+	if len(s) < 1024 {
+		// for small buffers, using Encode directly is much faster.
+		dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+		base64.StdEncoding.Encode(dst, s)
+		e.Write(dst)
+	} else {
+		// for large buffers, avoid unnecessary extra temporary
+		// buffer space.
+		enc := base64.NewEncoder(base64.StdEncoding, e)
+		enc.Write(s)
+		enc.Close()
+	}
+	e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+	arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	se.arrayEnc(e, v, opts)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+	// Byte slices get special treatment; arrays don't.
+	if t.Elem().Kind() == reflect.Uint8 &&
+		!t.Elem().Implements(marshalerType) &&
+		!t.Elem().Implements(textMarshalerType) {
+		return encodeByteSlice
+	}
+	enc := &sliceEncoder{newArrayEncoder(t)}
+	return enc.encode
+}
+
+type arrayEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	e.WriteByte('[')
+	n := v.Len()
+	for i := 0; i < n; i++ {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		ae.elemEnc(e, v.Index(i), opts)
+	}
+	e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+	enc := &arrayEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type ptrEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	pe.elemEnc(e, v.Elem(), opts)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+	enc := &ptrEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type condAddrEncoder struct {
+	canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.CanAddr() {
+		ce.canAddrEnc(e, v, opts)
+	} else {
+		ce.elseEnc(e, v, opts)
+	}
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+	enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+	return enc.encode
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+	for _, i := range index {
+		if v.Kind() == reflect.Ptr {
+			if v.IsNil() {
+				return reflect.Value{}
+			}
+			v = v.Elem()
+		}
+		v = v.Field(i)
+	}
+	return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+	for _, i := range index {
+		if t.Kind() == reflect.Ptr {
+			t = t.Elem()
+		}
+		t = t.Field(i).Type
+	}
+	return t
+}
+
+type reflectWithString struct {
+	v reflect.Value
+	s string
+}
+
+func (w *reflectWithString) resolve() error {
+	if w.v.Kind() == reflect.String {
+		w.s = w.v.String()
+		return nil
+	}
+	buf, err := w.v.Interface().(encoding.TextMarshaler).MarshalText()
+	w.s = string(buf)
+	return err
+}
+
+// byString is a slice of reflectWithString where the reflect.Value is either
+// a string or an encoding.TextMarshaler.
+// It implements the methods to sort by string.
+type byString []reflectWithString
+
+func (sv byString) Len() int           { return len(sv) }
+func (sv byString) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv byString) Less(i, j int) bool { return sv[i].s < sv[j].s }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string, escapeHTML bool) int {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' &&
+				(!escapeHTML || b != '<' && b != '>' && b != '&') {
+				i++
+				continue
+			}
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.WriteString(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte, escapeHTML bool) int {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' &&
+				(!escapeHTML || b != '<' && b != '>' && b != '&') {
+				i++
+				continue
+			}
+			if start < i {
+				e.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.Write(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Only strings, floats, integers, and booleans can be quoted.
+				quoted := false
+				if opts.Contains("string") {
+					switch ft.Kind() {
+					case reflect.Bool,
+						reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+						reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+						reflect.Float32, reflect.Float64,
+						reflect.String:
+						quoted = true
+					}
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    quoted,
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/extension.go b/go/vendor/github.com/globalsign/mgo/internal/json/extension.go
new file mode 100644
index 0000000..1c8fd45
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/extension.go
@@ -0,0 +1,95 @@
+package json
+
+import (
+	"reflect"
+)
+
+// Extension holds a set of additional rules to be used when unmarshaling
+// strict JSON or JSON-like content.
+type Extension struct {
+	funcs  map[string]funcExt
+	consts map[string]interface{}
+	keyed  map[string]func([]byte) (interface{}, error)
+	encode map[reflect.Type]func(v interface{}) ([]byte, error)
+
+	unquotedKeys   bool
+	trailingCommas bool
+}
+
+type funcExt struct {
+	key  string
+	args []string
+}
+
+// Extend changes the decoder behavior to consider the provided extension.
+func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
+
+// Extend changes the encoder behavior to consider the provided extension.
+func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
+
+// Extend includes in e the extensions defined in ext.
+func (e *Extension) Extend(ext *Extension) {
+	for name, fext := range ext.funcs {
+		e.DecodeFunc(name, fext.key, fext.args...)
+	}
+	for name, value := range ext.consts {
+		e.DecodeConst(name, value)
+	}
+	for key, decode := range ext.keyed {
+		e.DecodeKeyed(key, decode)
+	}
+	for typ, encode := range ext.encode {
+		if e.encode == nil {
+			e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+		}
+		e.encode[typ] = encode
+	}
+}
+
+// DecodeFunc defines a function call that may be observed inside JSON content.
+// A function with the provided name will be unmarshaled as the document
+// {key: {args[0]: ..., args[N]: ...}}.
+func (e *Extension) DecodeFunc(name string, key string, args ...string) {
+	if e.funcs == nil {
+		e.funcs = make(map[string]funcExt)
+	}
+	e.funcs[name] = funcExt{key, args}
+}
+
+// DecodeConst defines a constant name that may be observed inside JSON content
+// and will be decoded with the provided value.
+func (e *Extension) DecodeConst(name string, value interface{}) {
+	if e.consts == nil {
+		e.consts = make(map[string]interface{})
+	}
+	e.consts[name] = value
+}
+
+// DecodeKeyed defines a key that when observed as the first element inside a
+// JSON document triggers the decoding of that document via the provided
+// decode function.
+func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
+	if e.keyed == nil {
+		e.keyed = make(map[string]func([]byte) (interface{}, error))
+	}
+	e.keyed[key] = decode
+}
+
+// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
+func (e *Extension) DecodeUnquotedKeys(accept bool) {
+	e.unquotedKeys = accept
+}
+
+// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
+func (e *Extension) DecodeTrailingCommas(accept bool) {
+	e.trailingCommas = accept
+}
+
+// EncodeType registers a function to encode values with the same type of the
+// provided sample.
+func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
+	if e.encode == nil {
+		e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+	}
+	e.encode[reflect.TypeOf(sample)] = encode
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/fold.go b/go/vendor/github.com/globalsign/mgo/internal/json/fold.go
new file mode 100644
index 0000000..9e17012
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'Å¿' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See https://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/indent.go b/go/vendor/github.com/globalsign/mgo/internal/json/indent.go
new file mode 100644
index 0000000..fba1954
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+	return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+	origLen := dst.Len()
+	var scan scanner
+	scan.reset()
+	start := 0
+	for i, c := range src {
+		if escape && (c == '<' || c == '>' || c == '&') {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+		v := scan.step(&scan, c)
+		if v >= scanSkipSpace {
+			if v == scanError {
+				break
+			}
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			start = i + 1
+		}
+	}
+	if scan.eof() == scanError {
+		dst.Truncate(origLen)
+		return scan.err
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+	return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+	dst.WriteByte('\n')
+	dst.WriteString(prefix)
+	for i := 0; i < depth; i++ {
+		dst.WriteString(indent)
+	}
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+	origLen := dst.Len()
+	var scan scanner
+	scan.reset()
+	needIndent := false
+	depth := 0
+	for _, c := range src {
+		scan.bytes++
+		v := scan.step(&scan, c)
+		if v == scanSkipSpace {
+			continue
+		}
+		if v == scanError {
+			break
+		}
+		if needIndent && v != scanEndObject && v != scanEndArray {
+			needIndent = false
+			depth++
+			newline(dst, prefix, indent, depth)
+		}
+
+		// Emit semantically uninteresting bytes
+		// (in particular, punctuation in strings) unmodified.
+		if v == scanContinue {
+			dst.WriteByte(c)
+			continue
+		}
+
+		// Add spacing around real punctuation.
+		switch c {
+		case '{', '[':
+			// delay indent so that empty object and array are formatted as {} and [].
+			needIndent = true
+			dst.WriteByte(c)
+
+		case ',':
+			dst.WriteByte(c)
+			newline(dst, prefix, indent, depth)
+
+		case ':':
+			dst.WriteByte(c)
+			dst.WriteByte(' ')
+
+		case '}', ']':
+			if needIndent {
+				// suppress indent in empty object/array
+				needIndent = false
+			} else {
+				depth--
+				newline(dst, prefix, indent, depth)
+			}
+			dst.WriteByte(c)
+
+		default:
+			dst.WriteByte(c)
+		}
+	}
+	if scan.eof() == scanError {
+		dst.Truncate(origLen)
+		return scan.err
+	}
+	return nil
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/scanner.go b/go/vendor/github.com/globalsign/mgo/internal/json/scanner.go
new file mode 100644
index 0000000..9708043
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/scanner.go
@@ -0,0 +1,697 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+	scan.reset()
+	for _, c := range data {
+		scan.bytes++
+		if scan.step(scan, c) == scanError {
+			return scan.err
+		}
+	}
+	if scan.eof() == scanError {
+		return scan.err
+	}
+	return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+	scan.reset()
+	for i, c := range data {
+		v := scan.step(scan, c)
+		if v >= scanEndObject {
+			switch v {
+			// probe the scanner with a space to determine whether we will
+			// get scanEnd on the next character. Otherwise, if the next character
+			// is not a space, scanEndTop allocates a needless error.
+			case scanEndObject, scanEndArray, scanEndParams:
+				if scan.step(scan, ' ') == scanEnd {
+					return data[:i+1], data[i+1:], nil
+				}
+			case scanError:
+				return nil, nil, scan.err
+			case scanEnd:
+				return data[:i], data[i:], nil
+			}
+		}
+	}
+	if scan.eof() == scanError {
+		return nil, nil, scan.err
+	}
+	return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+	msg    string // description of error
+	Offset int64  // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in.  (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+	// The step is a func to be called to execute the next transition.
+	// Also tried using an integer constant and a single func
+	// with a switch, but using the func directly was 10% faster
+	// on a 64-bit Mac Mini, and it's nicer to read.
+	step func(*scanner, byte) int
+
+	// Reached end of top-level value.
+	endTop bool
+
+	// Stack of what we're in the middle of - array values, object keys, object values.
+	parseState []int
+
+	// Error that happened, if any.
+	err error
+
+	// 1-byte redo (see undo method)
+	redo      bool
+	redoCode  int
+	redoState func(*scanner, byte) int
+
+	// total bytes consumed, updated by decoder.Decode
+	bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+	// Continue.
+	scanContinue     = iota // uninteresting byte
+	scanBeginLiteral        // end implied by next result != scanContinue
+	scanBeginObject         // begin object
+	scanObjectKey           // just finished object key (string)
+	scanObjectValue         // just finished non-last object value
+	scanEndObject           // end object (implies scanObjectValue if possible)
+	scanBeginArray          // begin array
+	scanArrayValue          // just finished array value
+	scanEndArray            // end array (implies scanArrayValue if possible)
+	scanBeginName           // begin function call
+	scanParam               // begin function argument
+	scanEndParams           // end function call
+	scanSkipSpace           // space byte; can skip; known to be last "continue" result
+
+	// Stop.
+	scanEnd   // top-level value ended *before* this byte; known to be first "stop" result
+	scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+	parseObjectKey   = iota // parsing object key (before colon)
+	parseObjectValue        // parsing object value (after colon)
+	parseArrayValue         // parsing array value
+	parseName               // parsing unquoted name
+	parseParam              // parsing function argument value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+	s.step = stateBeginValue
+	s.parseState = s.parseState[0:0]
+	s.err = nil
+	s.redo = false
+	s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+	if s.err != nil {
+		return scanError
+	}
+	if s.endTop {
+		return scanEnd
+	}
+	s.step(s, ' ')
+	if s.endTop {
+		return scanEnd
+	}
+	if s.err == nil {
+		s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+	}
+	return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+	s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+	n := len(s.parseState) - 1
+	s.parseState = s.parseState[0:n]
+	s.redo = false
+	if n == 0 {
+		s.step = stateEndTop
+		s.endTop = true
+	} else {
+		s.step = stateEndValue
+	}
+}
+
+func isSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ']' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	switch c {
+	case '{':
+		s.step = stateBeginStringOrEmpty
+		s.pushParseState(parseObjectKey)
+		return scanBeginObject
+	case '[':
+		s.step = stateBeginValueOrEmpty
+		s.pushParseState(parseArrayValue)
+		return scanBeginArray
+	case '"':
+		s.step = stateInString
+		return scanBeginLiteral
+	case '-':
+		s.step = stateNeg
+		return scanBeginLiteral
+	case '0': // beginning of 0.123
+		s.step = state0
+		return scanBeginLiteral
+	case 'n':
+		s.step = stateNew0
+		return scanBeginName
+	}
+	if '1' <= c && c <= '9' { // beginning of 1234.5
+		s.step = state1
+		return scanBeginLiteral
+	}
+	if isName(c) {
+		s.step = stateName
+		return scanBeginName
+	}
+	return s.error(c, "looking for beginning of value")
+}
+
+func isName(c byte) bool {
+	return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '}' {
+		n := len(s.parseState)
+		s.parseState[n-1] = parseObjectValue
+		return stateEndValue(s, c)
+	}
+	return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '"' {
+		s.step = stateInString
+		return scanBeginLiteral
+	}
+	if isName(c) {
+		s.step = stateName
+		return scanBeginName
+	}
+	return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+	n := len(s.parseState)
+	if n == 0 {
+		// Completed top-level before the current byte.
+		s.step = stateEndTop
+		s.endTop = true
+		return stateEndTop(s, c)
+	}
+	if c <= ' ' && isSpace(c) {
+		s.step = stateEndValue
+		return scanSkipSpace
+	}
+	ps := s.parseState[n-1]
+	switch ps {
+	case parseObjectKey:
+		if c == ':' {
+			s.parseState[n-1] = parseObjectValue
+			s.step = stateBeginValue
+			return scanObjectKey
+		}
+		return s.error(c, "after object key")
+	case parseObjectValue:
+		if c == ',' {
+			s.parseState[n-1] = parseObjectKey
+			s.step = stateBeginStringOrEmpty
+			return scanObjectValue
+		}
+		if c == '}' {
+			s.popParseState()
+			return scanEndObject
+		}
+		return s.error(c, "after object key:value pair")
+	case parseArrayValue:
+		if c == ',' {
+			s.step = stateBeginValueOrEmpty
+			return scanArrayValue
+		}
+		if c == ']' {
+			s.popParseState()
+			return scanEndArray
+		}
+		return s.error(c, "after array element")
+	case parseParam:
+		if c == ',' {
+			s.step = stateBeginValue
+			return scanParam
+		}
+		if c == ')' {
+			s.popParseState()
+			return scanEndParams
+		}
+		return s.error(c, "after array element")
+	}
+	return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+		// Complain about non-space byte on next call.
+		s.error(c, "after top-level value")
+	}
+	return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+	if c == '"' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	if c == '\\' {
+		s.step = stateInStringEsc
+		return scanContinue
+	}
+	if c < 0x20 {
+		return s.error(c, "in string literal")
+	}
+	return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+	switch c {
+	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+		s.step = stateInString
+		return scanContinue
+	case 'u':
+		s.step = stateInStringEscU
+		return scanContinue
+	}
+	return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU1
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU12
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU123
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInString
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+	if c == '0' {
+		s.step = state0
+		return scanContinue
+	}
+	if '1' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+	if c == '.' {
+		s.step = stateDot
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateDot0
+		return scanContinue
+	}
+	return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+	if c == '+' || c == '-' {
+		s.step = stateESign
+		return scanContinue
+	}
+	return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateE0
+		return scanContinue
+	}
+	return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateNew0 is the state after reading `n`.
+func stateNew0(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateNew1
+		return scanContinue
+	}
+	s.step = stateName
+	return stateName(s, c)
+}
+
+// stateNew1 is the state after reading `ne`.
+func stateNew1(s *scanner, c byte) int {
+	if c == 'w' {
+		s.step = stateNew2
+		return scanContinue
+	}
+	s.step = stateName
+	return stateName(s, c)
+}
+
+// stateNew2 is the state after reading `new`.
+func stateNew2(s *scanner, c byte) int {
+	s.step = stateName
+	if c == ' ' {
+		return scanContinue
+	}
+	return stateName(s, c)
+}
+
+// stateName is the state while reading an unquoted function name.
+func stateName(s *scanner, c byte) int {
+	if isName(c) {
+		return scanContinue
+	}
+	if c == '(' {
+		s.step = stateParamOrEmpty
+		s.pushParseState(parseParam)
+		return scanParam
+	}
+	return stateEndValue(s, c)
+}
+
+// stateParamOrEmpty is the state after reading `(`.
+func stateParamOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ')' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+	if c == 'r' {
+		s.step = stateTr
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateTru
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+	if c == 'a' {
+		s.step = stateFa
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateFal
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+	if c == 's' {
+		s.step = stateFals
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateNu
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateNul
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+	return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+	s.step = stateError
+	s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+	return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+	// special cases - different from quoted strings
+	if c == '\'' {
+		return `'\''`
+	}
+	if c == '"' {
+		return `'"'`
+	}
+
+	// use quoted string with different quotation marks
+	s := strconv.Quote(string(c))
+	return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+	if s.redo {
+		panic("json: invalid use of scanner")
+	}
+	s.redoCode = scanCode
+	s.redoState = s.step
+	s.step = stateRedo
+	s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+	s.redo = false
+	s.step = s.redoState
+	return s.redoCode
+}
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/stream.go b/go/vendor/github.com/globalsign/mgo/internal/json/stream.go
new file mode 100644
index 0000000..e023702
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/stream.go
@@ -0,0 +1,510 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"errors"
+	"io"
+)
+
+// A Decoder reads and decodes JSON values from an input stream.
+type Decoder struct {
+	r     io.Reader
+	buf   []byte
+	d     decodeState
+	scanp int // start of unread data in buf
+	scan  scanner
+	err   error
+
+	tokenState int
+	tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+	if dec.err != nil {
+		return dec.err
+	}
+
+	if err := dec.tokenPrepareForDecode(); err != nil {
+		return err
+	}
+
+	if !dec.tokenValueAllowed() {
+		return &SyntaxError{msg: "not at beginning of value"}
+	}
+
+	// Read whole value into buffer.
+	n, err := dec.readValue()
+	if err != nil {
+		return err
+	}
+	dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+	dec.scanp += n
+
+	// Don't save err from unmarshal into dec.err:
+	// the connection is still usable since we read a complete JSON
+	// object from it before the error happened.
+	err = dec.d.unmarshal(v)
+
+	// fixup token streaming state
+	dec.tokenValueEnd()
+
+	return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+	return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+	dec.scan.reset()
+
+	scanp := dec.scanp
+	var err error
+Input:
+	for {
+		// Look in the buffer for a new value.
+		for i, c := range dec.buf[scanp:] {
+			dec.scan.bytes++
+			v := dec.scan.step(&dec.scan, c)
+			if v == scanEnd {
+				scanp += i
+				break Input
+			}
+			// scanEnd is delayed one byte.
+			// We might block trying to get that byte from src,
+			// so instead invent a space byte.
+			if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+				scanp += i + 1
+				break Input
+			}
+			if v == scanError {
+				dec.err = dec.scan.err
+				return 0, dec.scan.err
+			}
+		}
+		scanp = len(dec.buf)
+
+		// Did the last read have an error?
+		// Delayed until now to allow buffer scan.
+		if err != nil {
+			if err == io.EOF {
+				if dec.scan.step(&dec.scan, ' ') == scanEnd {
+					break Input
+				}
+				if nonSpace(dec.buf) {
+					err = io.ErrUnexpectedEOF
+				}
+			}
+			dec.err = err
+			return 0, err
+		}
+
+		n := scanp - dec.scanp
+		err = dec.refill()
+		scanp = dec.scanp + n
+	}
+	return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+	// Make room to read more into the buffer.
+	// First slide down data already consumed.
+	if dec.scanp > 0 {
+		n := copy(dec.buf, dec.buf[dec.scanp:])
+		dec.buf = dec.buf[:n]
+		dec.scanp = 0
+	}
+
+	// Grow buffer if not large enough.
+	const minRead = 512
+	if cap(dec.buf)-len(dec.buf) < minRead {
+		newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+		copy(newBuf, dec.buf)
+		dec.buf = newBuf
+	}
+
+	// Read. Delay error for next iteration (after scan).
+	n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+	dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+	return err
+}
+
+func nonSpace(b []byte) bool {
+	for _, c := range b {
+		if !isSpace(c) {
+			return true
+		}
+	}
+	return false
+}
+
+// An Encoder writes JSON values to an output stream.
+type Encoder struct {
+	w          io.Writer
+	err        error
+	escapeHTML bool
+
+	indentBuf    *bytes.Buffer
+	indentPrefix string
+	indentValue  string
+
+	ext Extension
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{w: w, escapeHTML: true}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+	if enc.err != nil {
+		return enc.err
+	}
+	e := newEncodeState()
+	e.ext = enc.ext
+	err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
+	if err != nil {
+		return err
+	}
+
+	// Terminate each value with a newline.
+	// This makes the output look a little nicer
+	// when debugging, and some kind of space
+	// is required if the encoded value was a number,
+	// so that the reader knows there aren't more
+	// digits coming.
+	e.WriteByte('\n')
+
+	b := e.Bytes()
+	if enc.indentBuf != nil {
+		enc.indentBuf.Reset()
+		err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
+		if err != nil {
+			return err
+		}
+		b = enc.indentBuf.Bytes()
+	}
+	if _, err = enc.w.Write(b); err != nil {
+		enc.err = err
+	}
+	encodeStatePool.Put(e)
+	return err
+}
+
+// Indent sets the encoder to format each encoded value with Indent.
+func (enc *Encoder) Indent(prefix, indent string) {
+	enc.indentBuf = new(bytes.Buffer)
+	enc.indentPrefix = prefix
+	enc.indentValue = indent
+}
+
+// DisableHTMLEscaping causes the encoder not to escape angle brackets
+// ("<" and ">") or ampersands ("&") in JSON strings.
+func (enc *Encoder) DisableHTMLEscaping() {
+	enc.escapeHTML = false
+}
+
+// RawMessage is a raw encoded JSON value.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+	return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+	if m == nil {
+		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+	}
+	*m = append((*m)[0:0], data...)
+	return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+//	Delim, for the four JSON delimiters [ ] { }
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	Number, for JSON numbers
+//	string, for JSON string literals
+//	nil, for JSON null
+//
+type Token interface{}
+
+const (
+	tokenTopValue = iota
+	tokenArrayStart
+	tokenArrayValue
+	tokenArrayComma
+	tokenObjectStart
+	tokenObjectKey
+	tokenObjectColon
+	tokenObjectValue
+	tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+	// Note: Not calling peek before switch, to avoid
+	// putting peek into the standard Decode path.
+	// peek is only called when using the Token API.
+	switch dec.tokenState {
+	case tokenArrayComma:
+		c, err := dec.peek()
+		if err != nil {
+			return err
+		}
+		if c != ',' {
+			return &SyntaxError{"expected comma after array element", 0}
+		}
+		dec.scanp++
+		dec.tokenState = tokenArrayValue
+	case tokenObjectColon:
+		c, err := dec.peek()
+		if err != nil {
+			return err
+		}
+		if c != ':' {
+			return &SyntaxError{"expected colon after object key", 0}
+		}
+		dec.scanp++
+		dec.tokenState = tokenObjectValue
+	}
+	return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+	switch dec.tokenState {
+	case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+		return true
+	}
+	return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+	switch dec.tokenState {
+	case tokenArrayStart, tokenArrayValue:
+		dec.tokenState = tokenArrayComma
+	case tokenObjectValue:
+		dec.tokenState = tokenObjectComma
+	}
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+	return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+	for {
+		c, err := dec.peek()
+		if err != nil {
+			return nil, err
+		}
+		switch c {
+		case '[':
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+			dec.tokenState = tokenArrayStart
+			return Delim('['), nil
+
+		case ']':
+			if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+			dec.tokenValueEnd()
+			return Delim(']'), nil
+
+		case '{':
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+			dec.tokenState = tokenObjectStart
+			return Delim('{'), nil
+
+		case '}':
+			if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+			dec.tokenValueEnd()
+			return Delim('}'), nil
+
+		case ':':
+			if dec.tokenState != tokenObjectColon {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = tokenObjectValue
+			continue
+
+		case ',':
+			if dec.tokenState == tokenArrayComma {
+				dec.scanp++
+				dec.tokenState = tokenArrayValue
+				continue
+			}
+			if dec.tokenState == tokenObjectComma {
+				dec.scanp++
+				dec.tokenState = tokenObjectKey
+				continue
+			}
+			return dec.tokenError(c)
+
+		case '"':
+			if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+				var x string
+				old := dec.tokenState
+				dec.tokenState = tokenTopValue
+				err := dec.Decode(&x)
+				dec.tokenState = old
+				if err != nil {
+					clearOffset(err)
+					return nil, err
+				}
+				dec.tokenState = tokenObjectColon
+				return x, nil
+			}
+			fallthrough
+
+		default:
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			var x interface{}
+			if err := dec.Decode(&x); err != nil {
+				clearOffset(err)
+				return nil, err
+			}
+			return x, nil
+		}
+	}
+}
+
+func clearOffset(err error) {
+	if s, ok := err.(*SyntaxError); ok {
+		s.Offset = 0
+	}
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+	var context string
+	switch dec.tokenState {
+	case tokenTopValue:
+		context = " looking for beginning of value"
+	case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+		context = " looking for beginning of value"
+	case tokenArrayComma:
+		context = " after array element"
+	case tokenObjectKey:
+		context = " looking for beginning of object key string"
+	case tokenObjectColon:
+		context = " after object key"
+	case tokenObjectComma:
+		context = " after object key:value pair"
+	}
+	return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+	c, err := dec.peek()
+	return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+	var err error
+	for {
+		for i := dec.scanp; i < len(dec.buf); i++ {
+			c := dec.buf[i]
+			if isSpace(c) {
+				continue
+			}
+			dec.scanp = i
+			return c, nil
+		}
+		// buffer has been scanned, now report any error
+		if err != nil {
+			return 0, err
+		}
+		err = dec.refill()
+	}
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error  {
+	...
+}
+
+*/
diff --git a/go/vendor/github.com/globalsign/mgo/internal/json/tags.go b/go/vendor/github.com/globalsign/mgo/internal/json/tags.go
new file mode 100644
index 0000000..c38fd51
--- /dev/null
+++ b/go/vendor/github.com/globalsign/mgo/internal/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}