vendorify
diff --git a/go/vendor/github.com/mailru/easyjson/LICENSE b/go/vendor/github.com/mailru/easyjson/LICENSE
new file mode 100644
index 0000000..fbff658
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2016 Mail.Ru Group
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/go/vendor/github.com/mailru/easyjson/buffer/pool.go b/go/vendor/github.com/mailru/easyjson/buffer/pool.go
new file mode 100644
index 0000000..07fb4bc
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/buffer/pool.go
@@ -0,0 +1,270 @@
+// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
+// reduce copying and to allow reuse of individual chunks.
+package buffer
+
+import (
+	"io"
+	"sync"
+)
+
+// PoolConfig contains configuration for the allocation and reuse strategy.
+type PoolConfig struct {
+	StartSize  int // Minimum chunk size that is allocated.
+	PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
+	MaxSize    int // Maximum chunk size that will be allocated.
+}
+
+var config = PoolConfig{
+	StartSize:  128,
+	PooledSize: 512,
+	MaxSize:    32768,
+}
+
+// Reuse pool: chunk size -> pool.
+var buffers = map[int]*sync.Pool{}
+
+func initBuffers() {
+	for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
+		buffers[l] = new(sync.Pool)
+	}
+}
+
+func init() {
+	initBuffers()
+}
+
+// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
+func Init(cfg PoolConfig) {
+	config = cfg
+	initBuffers()
+}
+
+// putBuf puts a chunk to reuse pool if it can be reused.
+func putBuf(buf []byte) {
+	size := cap(buf)
+	if size < config.PooledSize {
+		return
+	}
+	if c := buffers[size]; c != nil {
+		c.Put(buf[:0])
+	}
+}
+
+// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
+func getBuf(size int) []byte {
+	if size < config.PooledSize {
+		return make([]byte, 0, size)
+	}
+
+	if c := buffers[size]; c != nil {
+		v := c.Get()
+		if v != nil {
+			return v.([]byte)
+		}
+	}
+	return make([]byte, 0, size)
+}
+
+// Buffer is a buffer optimized for serialization without extra copying.
+type Buffer struct {
+
+	// Buf is the current chunk that can be used for serialization.
+	Buf []byte
+
+	toPool []byte
+	bufs   [][]byte
+}
+
+// EnsureSpace makes sure that the current chunk contains at least s free bytes,
+// possibly creating a new chunk.
+func (b *Buffer) EnsureSpace(s int) {
+	if cap(b.Buf)-len(b.Buf) >= s {
+		return
+	}
+	l := len(b.Buf)
+	if l > 0 {
+		if cap(b.toPool) != cap(b.Buf) {
+			// Chunk was reallocated, toPool can be pooled.
+			putBuf(b.toPool)
+		}
+		if cap(b.bufs) == 0 {
+			b.bufs = make([][]byte, 0, 8)
+		}
+		b.bufs = append(b.bufs, b.Buf)
+		l = cap(b.toPool) * 2
+	} else {
+		l = config.StartSize
+	}
+
+	if l > config.MaxSize {
+		l = config.MaxSize
+	}
+	b.Buf = getBuf(l)
+	b.toPool = b.Buf
+}
+
+// AppendByte appends a single byte to buffer.
+func (b *Buffer) AppendByte(data byte) {
+	if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+		b.EnsureSpace(1)
+	}
+	b.Buf = append(b.Buf, data)
+}
+
+// AppendBytes appends a byte slice to buffer.
+func (b *Buffer) AppendBytes(data []byte) {
+	for len(data) > 0 {
+		if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+			b.EnsureSpace(1)
+		}
+
+		sz := cap(b.Buf) - len(b.Buf)
+		if sz > len(data) {
+			sz = len(data)
+		}
+
+		b.Buf = append(b.Buf, data[:sz]...)
+		data = data[sz:]
+	}
+}
+
+// AppendBytes appends a string to buffer.
+func (b *Buffer) AppendString(data string) {
+	for len(data) > 0 {
+		if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+			b.EnsureSpace(1)
+		}
+
+		sz := cap(b.Buf) - len(b.Buf)
+		if sz > len(data) {
+			sz = len(data)
+		}
+
+		b.Buf = append(b.Buf, data[:sz]...)
+		data = data[sz:]
+	}
+}
+
+// Size computes the size of a buffer by adding sizes of every chunk.
+func (b *Buffer) Size() int {
+	size := len(b.Buf)
+	for _, buf := range b.bufs {
+		size += len(buf)
+	}
+	return size
+}
+
+// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
+func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
+	var n int
+	for _, buf := range b.bufs {
+		if err == nil {
+			n, err = w.Write(buf)
+			written += n
+		}
+		putBuf(buf)
+	}
+
+	if err == nil {
+		n, err = w.Write(b.Buf)
+		written += n
+	}
+	putBuf(b.toPool)
+
+	b.bufs = nil
+	b.Buf = nil
+	b.toPool = nil
+
+	return
+}
+
+// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
+// copied if it does not fit in a single chunk. You can optionally provide one byte
+// slice as argument that it will try to reuse.
+func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
+	if len(b.bufs) == 0 {
+		ret := b.Buf
+		b.toPool = nil
+		b.Buf = nil
+		return ret
+	}
+
+	var ret []byte
+	size := b.Size()
+
+	// If we got a buffer as argument and it is big enought, reuse it.
+	if len(reuse) == 1 && cap(reuse[0]) >= size {
+		ret = reuse[0][:0]
+	} else {
+		ret = make([]byte, 0, size)
+	}
+	for _, buf := range b.bufs {
+		ret = append(ret, buf...)
+		putBuf(buf)
+	}
+
+	ret = append(ret, b.Buf...)
+	putBuf(b.toPool)
+
+	b.bufs = nil
+	b.toPool = nil
+	b.Buf = nil
+
+	return ret
+}
+
+type readCloser struct {
+	offset int
+	bufs   [][]byte
+}
+
+func (r *readCloser) Read(p []byte) (n int, err error) {
+	for _, buf := range r.bufs {
+		// Copy as much as we can.
+		x := copy(p[n:], buf[r.offset:])
+		n += x // Increment how much we filled.
+
+		// Did we empty the whole buffer?
+		if r.offset+x == len(buf) {
+			// On to the next buffer.
+			r.offset = 0
+			r.bufs = r.bufs[1:]
+
+			// We can release this buffer.
+			putBuf(buf)
+		} else {
+			r.offset += x
+		}
+
+		if n == len(p) {
+			break
+		}
+	}
+	// No buffers left or nothing read?
+	if len(r.bufs) == 0 {
+		err = io.EOF
+	}
+	return
+}
+
+func (r *readCloser) Close() error {
+	// Release all remaining buffers.
+	for _, buf := range r.bufs {
+		putBuf(buf)
+	}
+	// In case Close gets called multiple times.
+	r.bufs = nil
+
+	return nil
+}
+
+// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
+func (b *Buffer) ReadCloser() io.ReadCloser {
+	ret := &readCloser{0, append(b.bufs, b.Buf)}
+
+	b.bufs = nil
+	b.toPool = nil
+	b.Buf = nil
+
+	return ret
+}
diff --git a/go/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/go/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
new file mode 100644
index 0000000..ff7b27c
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
@@ -0,0 +1,24 @@
+// This file will only be included to the build if neither
+// easyjson_nounsafe nor appengine build tag is set. See README notes
+// for more details.
+
+//+build !easyjson_nounsafe
+//+build !appengine
+
+package jlexer
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// bytesToStr creates a string pointing at the slice to avoid copying.
+//
+// Warning: the string returned by the function should be used with care, as the whole input data
+// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
+// may be garbage-collected even when the string exists.
+func bytesToStr(data []byte) string {
+	h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+	shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
+	return *(*string)(unsafe.Pointer(&shdr))
+}
diff --git a/go/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/go/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
new file mode 100644
index 0000000..864d1be
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
@@ -0,0 +1,13 @@
+// This file is included to the build if any of the buildtags below
+// are defined. Refer to README notes for more details.
+
+//+build easyjson_nounsafe appengine
+
+package jlexer
+
+// bytesToStr creates a string normally from []byte
+//
+// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
+func bytesToStr(data []byte) string {
+	return string(data)
+}
diff --git a/go/vendor/github.com/mailru/easyjson/jlexer/error.go b/go/vendor/github.com/mailru/easyjson/jlexer/error.go
new file mode 100644
index 0000000..e90ec40
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/jlexer/error.go
@@ -0,0 +1,15 @@
+package jlexer
+
+import "fmt"
+
+// LexerError implements the error interface and represents all possible errors that can be
+// generated during parsing the JSON data.
+type LexerError struct {
+	Reason string
+	Offset int
+	Data   string
+}
+
+func (l *LexerError) Error() string {
+	return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
+}
diff --git a/go/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/go/vendor/github.com/mailru/easyjson/jlexer/lexer.go
new file mode 100644
index 0000000..51f0566
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -0,0 +1,1181 @@
+// Package jlexer contains a JSON lexer implementation.
+//
+// It is expected that it is mostly used with generated parser code, so the interface is tuned
+// for a parser that knows what kind of data is expected.
+package jlexer
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+// tokenKind determines type of a token.
+type tokenKind byte
+
+const (
+	tokenUndef  tokenKind = iota // No token.
+	tokenDelim                   // Delimiter: one of '{', '}', '[' or ']'.
+	tokenString                  // A string literal, e.g. "abc\u1234"
+	tokenNumber                  // Number literal, e.g. 1.5e5
+	tokenBool                    // Boolean literal: true or false.
+	tokenNull                    // null keyword.
+)
+
+// token describes a single token: type, position in the input and value.
+type token struct {
+	kind tokenKind // Type of a token.
+
+	boolValue  bool   // Value if a boolean literal token.
+	byteValue  []byte // Raw value of a token.
+	delimValue byte
+}
+
+// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
+type Lexer struct {
+	Data []byte // Input data given to the lexer.
+
+	start int   // Start of the current token.
+	pos   int   // Current unscanned position in the input stream.
+	token token // Last scanned token, if token.kind != tokenUndef.
+
+	firstElement bool // Whether current element is the first in array or an object.
+	wantSep      byte // A comma or a colon character, which need to occur before a token.
+
+	UseMultipleErrors bool          // If we want to use multiple errors.
+	fatalError        error         // Fatal error occurred during lexing. It is usually a syntax error.
+	multipleErrors    []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
+}
+
+// FetchToken scans the input for the next token.
+func (r *Lexer) FetchToken() {
+	r.token.kind = tokenUndef
+	r.start = r.pos
+
+	// Check if r.Data has r.pos element
+	// If it doesn't, it mean corrupted input data
+	if len(r.Data) < r.pos {
+		r.errParse("Unexpected end of data")
+		return
+	}
+	// Determine the type of a token by skipping whitespace and reading the
+	// first character.
+	for _, c := range r.Data[r.pos:] {
+		switch c {
+		case ':', ',':
+			if r.wantSep == c {
+				r.pos++
+				r.start++
+				r.wantSep = 0
+			} else {
+				r.errSyntax()
+			}
+
+		case ' ', '\t', '\r', '\n':
+			r.pos++
+			r.start++
+
+		case '"':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenString
+			r.fetchString()
+			return
+
+		case '{', '[':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+			r.firstElement = true
+			r.token.kind = tokenDelim
+			r.token.delimValue = r.Data[r.pos]
+			r.pos++
+			return
+
+		case '}', ']':
+			if !r.firstElement && (r.wantSep != ',') {
+				r.errSyntax()
+			}
+			r.wantSep = 0
+			r.token.kind = tokenDelim
+			r.token.delimValue = r.Data[r.pos]
+			r.pos++
+			return
+
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+			r.token.kind = tokenNumber
+			r.fetchNumber()
+			return
+
+		case 'n':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenNull
+			r.fetchNull()
+			return
+
+		case 't':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenBool
+			r.token.boolValue = true
+			r.fetchTrue()
+			return
+
+		case 'f':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenBool
+			r.token.boolValue = false
+			r.fetchFalse()
+			return
+
+		default:
+			r.errSyntax()
+			return
+		}
+	}
+	r.fatalError = io.EOF
+	return
+}
+
+// isTokenEnd returns true if the char can follow a non-delimiter token
+func isTokenEnd(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
+}
+
+// fetchNull fetches and checks remaining bytes of null keyword.
+func (r *Lexer) fetchNull() {
+	r.pos += 4
+	if r.pos > len(r.Data) ||
+		r.Data[r.pos-3] != 'u' ||
+		r.Data[r.pos-2] != 'l' ||
+		r.Data[r.pos-1] != 'l' ||
+		(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+		r.pos -= 4
+		r.errSyntax()
+	}
+}
+
+// fetchTrue fetches and checks remaining bytes of true keyword.
+func (r *Lexer) fetchTrue() {
+	r.pos += 4
+	if r.pos > len(r.Data) ||
+		r.Data[r.pos-3] != 'r' ||
+		r.Data[r.pos-2] != 'u' ||
+		r.Data[r.pos-1] != 'e' ||
+		(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+		r.pos -= 4
+		r.errSyntax()
+	}
+}
+
+// fetchFalse fetches and checks remaining bytes of false keyword.
+func (r *Lexer) fetchFalse() {
+	r.pos += 5
+	if r.pos > len(r.Data) ||
+		r.Data[r.pos-4] != 'a' ||
+		r.Data[r.pos-3] != 'l' ||
+		r.Data[r.pos-2] != 's' ||
+		r.Data[r.pos-1] != 'e' ||
+		(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+		r.pos -= 5
+		r.errSyntax()
+	}
+}
+
+// fetchNumber scans a number literal token.
+func (r *Lexer) fetchNumber() {
+	hasE := false
+	afterE := false
+	hasDot := false
+
+	r.pos++
+	for i, c := range r.Data[r.pos:] {
+		switch {
+		case c >= '0' && c <= '9':
+			afterE = false
+		case c == '.' && !hasDot:
+			hasDot = true
+		case (c == 'e' || c == 'E') && !hasE:
+			hasE = true
+			hasDot = true
+			afterE = true
+		case (c == '+' || c == '-') && afterE:
+			afterE = false
+		default:
+			r.pos += i
+			if !isTokenEnd(c) {
+				r.errSyntax()
+			} else {
+				r.token.byteValue = r.Data[r.start:r.pos]
+			}
+			return
+		}
+	}
+
+	r.pos = len(r.Data)
+	r.token.byteValue = r.Data[r.start:]
+}
+
+// findStringLen tries to scan into the string literal for ending quote char to determine required size.
+// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
+func findStringLen(data []byte) (isValid, hasEscapes bool, length int) {
+	delta := 0
+
+	for i := 0; i < len(data); i++ {
+		switch data[i] {
+		case '\\':
+			i++
+			delta++
+			if i < len(data) && data[i] == 'u' {
+				delta++
+			}
+		case '"':
+			return true, (delta > 0), (i - delta)
+		}
+	}
+
+	return false, false, len(data)
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	var val rune
+	for i := 2; i < len(s) && i < 6; i++ {
+		var v byte
+		c := s[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			v = c - '0'
+		case 'a', 'b', 'c', 'd', 'e', 'f':
+			v = c - 'a' + 10
+		case 'A', 'B', 'C', 'D', 'E', 'F':
+			v = c - 'A' + 10
+		default:
+			return -1
+		}
+
+		val <<= 4
+		val |= rune(v)
+	}
+	return val
+}
+
+// processEscape processes a single escape sequence and returns number of bytes processed.
+func (r *Lexer) processEscape(data []byte) (int, error) {
+	if len(data) < 2 {
+		return 0, fmt.Errorf("syntax error at %v", string(data))
+	}
+
+	c := data[1]
+	switch c {
+	case '"', '/', '\\':
+		r.token.byteValue = append(r.token.byteValue, c)
+		return 2, nil
+	case 'b':
+		r.token.byteValue = append(r.token.byteValue, '\b')
+		return 2, nil
+	case 'f':
+		r.token.byteValue = append(r.token.byteValue, '\f')
+		return 2, nil
+	case 'n':
+		r.token.byteValue = append(r.token.byteValue, '\n')
+		return 2, nil
+	case 'r':
+		r.token.byteValue = append(r.token.byteValue, '\r')
+		return 2, nil
+	case 't':
+		r.token.byteValue = append(r.token.byteValue, '\t')
+		return 2, nil
+	case 'u':
+		rr := getu4(data)
+		if rr < 0 {
+			return 0, errors.New("syntax error")
+		}
+
+		read := 6
+		if utf16.IsSurrogate(rr) {
+			rr1 := getu4(data[read:])
+			if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+				read += 6
+				rr = dec
+			} else {
+				rr = unicode.ReplacementChar
+			}
+		}
+		var d [4]byte
+		s := utf8.EncodeRune(d[:], rr)
+		r.token.byteValue = append(r.token.byteValue, d[:s]...)
+		return read, nil
+	}
+
+	return 0, errors.New("syntax error")
+}
+
+// fetchString scans a string literal token.
+func (r *Lexer) fetchString() {
+	r.pos++
+	data := r.Data[r.pos:]
+
+	isValid, hasEscapes, length := findStringLen(data)
+	if !isValid {
+		r.pos += length
+		r.errParse("unterminated string literal")
+		return
+	}
+	if !hasEscapes {
+		r.token.byteValue = data[:length]
+		r.pos += length + 1
+		return
+	}
+
+	r.token.byteValue = make([]byte, 0, length)
+	p := 0
+	for i := 0; i < len(data); {
+		switch data[i] {
+		case '"':
+			r.pos += i + 1
+			r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+			i++
+			return
+
+		case '\\':
+			r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+			off, err := r.processEscape(data[i:])
+			if err != nil {
+				r.errParse(err.Error())
+				return
+			}
+			i += off
+			p = i
+
+		default:
+			i++
+		}
+	}
+	r.errParse("unterminated string literal")
+}
+
+// scanToken scans the next token if no token is currently available in the lexer.
+func (r *Lexer) scanToken() {
+	if r.token.kind != tokenUndef || r.fatalError != nil {
+		return
+	}
+
+	r.FetchToken()
+}
+
+// consume resets the current token to allow scanning the next one.
+func (r *Lexer) consume() {
+	r.token.kind = tokenUndef
+	r.token.delimValue = 0
+}
+
+// Ok returns true if no error (including io.EOF) was encountered during scanning.
+func (r *Lexer) Ok() bool {
+	return r.fatalError == nil
+}
+
+const maxErrorContextLen = 13
+
+func (r *Lexer) errParse(what string) {
+	if r.fatalError == nil {
+		var str string
+		if len(r.Data)-r.pos <= maxErrorContextLen {
+			str = string(r.Data)
+		} else {
+			str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
+		}
+		r.fatalError = &LexerError{
+			Reason: what,
+			Offset: r.pos,
+			Data:   str,
+		}
+	}
+}
+
+func (r *Lexer) errSyntax() {
+	r.errParse("syntax error")
+}
+
+func (r *Lexer) errInvalidToken(expected string) {
+	if r.fatalError != nil {
+		return
+	}
+	if r.UseMultipleErrors {
+		r.pos = r.start
+		r.consume()
+		r.SkipRecursive()
+		switch expected {
+		case "[":
+			r.token.delimValue = ']'
+			r.token.kind = tokenDelim
+		case "{":
+			r.token.delimValue = '}'
+			r.token.kind = tokenDelim
+		}
+		r.addNonfatalError(&LexerError{
+			Reason: fmt.Sprintf("expected %s", expected),
+			Offset: r.start,
+			Data:   string(r.Data[r.start:r.pos]),
+		})
+		return
+	}
+
+	var str string
+	if len(r.token.byteValue) <= maxErrorContextLen {
+		str = string(r.token.byteValue)
+	} else {
+		str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
+	}
+	r.fatalError = &LexerError{
+		Reason: fmt.Sprintf("expected %s", expected),
+		Offset: r.pos,
+		Data:   str,
+	}
+}
+
+func (r *Lexer) GetPos() int {
+	return r.pos
+}
+
+// Delim consumes a token and verifies that it is the given delimiter.
+func (r *Lexer) Delim(c byte) {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+
+	if !r.Ok() || r.token.delimValue != c {
+		r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
+		r.errInvalidToken(string([]byte{c}))
+	} else {
+		r.consume()
+	}
+}
+
+// IsDelim returns true if there was no scanning error and next token is the given delimiter.
+func (r *Lexer) IsDelim(c byte) bool {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	return !r.Ok() || r.token.delimValue == c
+}
+
+// Null verifies that the next token is null and consumes it.
+func (r *Lexer) Null() {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenNull {
+		r.errInvalidToken("null")
+	}
+	r.consume()
+}
+
+// IsNull returns true if the next token is a null keyword.
+func (r *Lexer) IsNull() bool {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	return r.Ok() && r.token.kind == tokenNull
+}
+
+// Skip skips a single token.
+func (r *Lexer) Skip() {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	r.consume()
+}
+
+// SkipRecursive skips next array or object completely, or just skips a single token if not
+// an array/object.
+//
+// Note: no syntax validation is performed on the skipped data.
+func (r *Lexer) SkipRecursive() {
+	r.scanToken()
+	var start, end byte
+
+	if r.token.delimValue == '{' {
+		start, end = '{', '}'
+	} else if r.token.delimValue == '[' {
+		start, end = '[', ']'
+	} else {
+		r.consume()
+		return
+	}
+
+	r.consume()
+
+	level := 1
+	inQuotes := false
+	wasEscape := false
+
+	for i, c := range r.Data[r.pos:] {
+		switch {
+		case c == start && !inQuotes:
+			level++
+		case c == end && !inQuotes:
+			level--
+			if level == 0 {
+				r.pos += i + 1
+				return
+			}
+		case c == '\\' && inQuotes:
+			wasEscape = !wasEscape
+			continue
+		case c == '"' && inQuotes:
+			inQuotes = wasEscape
+		case c == '"':
+			inQuotes = true
+		}
+		wasEscape = false
+	}
+	r.pos = len(r.Data)
+	r.fatalError = &LexerError{
+		Reason: "EOF reached while skipping array/object or token",
+		Offset: r.pos,
+		Data:   string(r.Data[r.pos:]),
+	}
+}
+
+// Raw fetches the next item recursively as a data slice
+func (r *Lexer) Raw() []byte {
+	r.SkipRecursive()
+	if !r.Ok() {
+		return nil
+	}
+	return r.Data[r.start:r.pos]
+}
+
+// IsStart returns whether the lexer is positioned at the start
+// of an input string.
+func (r *Lexer) IsStart() bool {
+	return r.pos == 0
+}
+
+// Consumed reads all remaining bytes from the input, publishing an error if
+// there is anything but whitespace remaining.
+func (r *Lexer) Consumed() {
+	if r.pos > len(r.Data) || !r.Ok() {
+		return
+	}
+
+	for _, c := range r.Data[r.pos:] {
+		if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+			r.AddError(&LexerError{
+				Reason: "invalid character '" + string(c) + "' after top-level value",
+				Offset: r.pos,
+				Data:   string(r.Data[r.pos:]),
+			})
+			return
+		}
+
+		r.pos++
+		r.start++
+	}
+}
+
+func (r *Lexer) unsafeString() (string, []byte) {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenString {
+		r.errInvalidToken("string")
+		return "", nil
+	}
+	bytes := r.token.byteValue
+	ret := bytesToStr(r.token.byteValue)
+	r.consume()
+	return ret, bytes
+}
+
+// UnsafeString returns the string value if the token is a string literal.
+//
+// Warning: returned string may point to the input buffer, so the string should not outlive
+// the input buffer. Intended pattern of usage is as an argument to a switch statement.
+func (r *Lexer) UnsafeString() string {
+	ret, _ := r.unsafeString()
+	return ret
+}
+
+// UnsafeBytes returns the byte slice if the token is a string literal.
+func (r *Lexer) UnsafeBytes() []byte {
+	_, ret := r.unsafeString()
+	return ret
+}
+
+// String reads a string literal.
+func (r *Lexer) String() string {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenString {
+		r.errInvalidToken("string")
+		return ""
+	}
+	ret := string(r.token.byteValue)
+	r.consume()
+	return ret
+}
+
+// Bytes reads a string literal and base64 decodes it into a byte slice.
+func (r *Lexer) Bytes() []byte {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenString {
+		r.errInvalidToken("string")
+		return nil
+	}
+	ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
+	n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
+	if err != nil {
+		r.fatalError = &LexerError{
+			Reason: err.Error(),
+		}
+		return nil
+	}
+
+	r.consume()
+	return ret[:n]
+}
+
+// Bool reads a true or false boolean keyword.
+func (r *Lexer) Bool() bool {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenBool {
+		r.errInvalidToken("bool")
+		return false
+	}
+	ret := r.token.boolValue
+	r.consume()
+	return ret
+}
+
+func (r *Lexer) number() string {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenNumber {
+		r.errInvalidToken("number")
+		return ""
+	}
+	ret := bytesToStr(r.token.byteValue)
+	r.consume()
+	return ret
+}
+
+func (r *Lexer) Uint8() uint8 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return uint8(n)
+}
+
+func (r *Lexer) Uint16() uint16 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return uint16(n)
+}
+
+func (r *Lexer) Uint32() uint32 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return uint32(n)
+}
+
+func (r *Lexer) Uint64() uint64 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Uint() uint {
+	return uint(r.Uint64())
+}
+
+func (r *Lexer) Int8() int8 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return int8(n)
+}
+
+func (r *Lexer) Int16() int16 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return int16(n)
+}
+
+func (r *Lexer) Int32() int32 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return int32(n)
+}
+
+func (r *Lexer) Int64() int64 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Int() int {
+	return int(r.Int64())
+}
+
+func (r *Lexer) Uint8Str() uint8 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return uint8(n)
+}
+
+func (r *Lexer) Uint16Str() uint16 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return uint16(n)
+}
+
+func (r *Lexer) Uint32Str() uint32 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return uint32(n)
+}
+
+func (r *Lexer) Uint64Str() uint64 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return n
+}
+
+func (r *Lexer) UintStr() uint {
+	return uint(r.Uint64Str())
+}
+
+func (r *Lexer) UintptrStr() uintptr {
+	return uintptr(r.Uint64Str())
+}
+
+func (r *Lexer) Int8Str() int8 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return int8(n)
+}
+
+func (r *Lexer) Int16Str() int16 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return int16(n)
+}
+
+func (r *Lexer) Int32Str() int32 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return int32(n)
+}
+
+func (r *Lexer) Int64Str() int64 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return n
+}
+
+func (r *Lexer) IntStr() int {
+	return int(r.Int64Str())
+}
+
+func (r *Lexer) Float32() float32 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return float32(n)
+}
+
+func (r *Lexer) Float32Str() float32 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+	n, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return float32(n)
+}
+
+func (r *Lexer) Float64() float64 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Float64Str() float64 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+	n, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Error() error {
+	return r.fatalError
+}
+
+func (r *Lexer) AddError(e error) {
+	if r.fatalError == nil {
+		r.fatalError = e
+	}
+}
+
+func (r *Lexer) AddNonFatalError(e error) {
+	r.addNonfatalError(&LexerError{
+		Offset: r.start,
+		Data:   string(r.Data[r.start:r.pos]),
+		Reason: e.Error(),
+	})
+}
+
+func (r *Lexer) addNonfatalError(err *LexerError) {
+	if r.UseMultipleErrors {
+		// We don't want to add errors with the same offset.
+		if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
+			return
+		}
+		r.multipleErrors = append(r.multipleErrors, err)
+		return
+	}
+	r.fatalError = err
+}
+
+func (r *Lexer) GetNonFatalErrors() []*LexerError {
+	return r.multipleErrors
+}
+
+// JsonNumber fetches and json.Number from 'encoding/json' package.
+// Both int, float or string, contains them are valid values
+func (r *Lexer) JsonNumber() json.Number {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() {
+		r.errInvalidToken("json.Number")
+		return json.Number("")
+	}
+
+	switch r.token.kind {
+	case tokenString:
+		return json.Number(r.String())
+	case tokenNumber:
+		return json.Number(r.Raw())
+	case tokenNull:
+		r.Null()
+		return json.Number("")
+	default:
+		r.errSyntax()
+		return json.Number("")
+	}
+}
+
+// Interface fetches an interface{} analogous to the 'encoding/json' package.
+func (r *Lexer) Interface() interface{} {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+
+	if !r.Ok() {
+		return nil
+	}
+	switch r.token.kind {
+	case tokenString:
+		return r.String()
+	case tokenNumber:
+		return r.Float64()
+	case tokenBool:
+		return r.Bool()
+	case tokenNull:
+		r.Null()
+		return nil
+	}
+
+	if r.token.delimValue == '{' {
+		r.consume()
+
+		ret := map[string]interface{}{}
+		for !r.IsDelim('}') {
+			key := r.String()
+			r.WantColon()
+			ret[key] = r.Interface()
+			r.WantComma()
+		}
+		r.Delim('}')
+
+		if r.Ok() {
+			return ret
+		} else {
+			return nil
+		}
+	} else if r.token.delimValue == '[' {
+		r.consume()
+
+		var ret []interface{}
+		for !r.IsDelim(']') {
+			ret = append(ret, r.Interface())
+			r.WantComma()
+		}
+		r.Delim(']')
+
+		if r.Ok() {
+			return ret
+		} else {
+			return nil
+		}
+	}
+	r.errSyntax()
+	return nil
+}
+
+// WantComma requires a comma to be present before fetching next token.
+func (r *Lexer) WantComma() {
+	r.wantSep = ','
+	r.firstElement = false
+}
+
+// WantColon requires a colon to be present before fetching next token.
+func (r *Lexer) WantColon() {
+	r.wantSep = ':'
+	r.firstElement = false
+}
diff --git a/go/vendor/github.com/mailru/easyjson/jwriter/writer.go b/go/vendor/github.com/mailru/easyjson/jwriter/writer.go
new file mode 100644
index 0000000..b9ed7cc
--- /dev/null
+++ b/go/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -0,0 +1,390 @@
+// Package jwriter contains a JSON writer.
+package jwriter
+
+import (
+	"io"
+	"strconv"
+	"unicode/utf8"
+
+	"github.com/mailru/easyjson/buffer"
+)
+
+// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
+// Flags field in Writer is used to set and pass them around.
+type Flags int
+
+const (
+	NilMapAsEmpty   Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
+	NilSliceAsEmpty                   // Encode nil slice as '[]' rather than 'null'.
+)
+
+// Writer is a JSON writer.
+type Writer struct {
+	Flags Flags
+
+	Error        error
+	Buffer       buffer.Buffer
+	NoEscapeHTML bool
+}
+
+// Size returns the size of the data that was written out.
+func (w *Writer) Size() int {
+	return w.Buffer.Size()
+}
+
+// DumpTo outputs the data to given io.Writer, resetting the buffer.
+func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
+	return w.Buffer.DumpTo(out)
+}
+
+// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
+// as argument that it will try to reuse.
+func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
+	if w.Error != nil {
+		return nil, w.Error
+	}
+
+	return w.Buffer.BuildBytes(reuse...), nil
+}
+
+// ReadCloser returns an io.ReadCloser that can be used to read the data.
+// ReadCloser also resets the buffer.
+func (w *Writer) ReadCloser() (io.ReadCloser, error) {
+	if w.Error != nil {
+		return nil, w.Error
+	}
+
+	return w.Buffer.ReadCloser(), nil
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawByte(c byte) {
+	w.Buffer.AppendByte(c)
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawString(s string) {
+	w.Buffer.AppendString(s)
+}
+
+// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
+// calling with results of MarshalJSON-like functions.
+func (w *Writer) Raw(data []byte, err error) {
+	switch {
+	case w.Error != nil:
+		return
+	case err != nil:
+		w.Error = err
+	case len(data) > 0:
+		w.Buffer.AppendBytes(data)
+	default:
+		w.RawString("null")
+	}
+}
+
+// RawText encloses raw binary data in quotes and appends in to the buffer.
+// Useful for calling with results of MarshalText-like functions.
+func (w *Writer) RawText(data []byte, err error) {
+	switch {
+	case w.Error != nil:
+		return
+	case err != nil:
+		w.Error = err
+	case len(data) > 0:
+		w.String(string(data))
+	default:
+		w.RawString("null")
+	}
+}
+
+// Base64Bytes appends data to the buffer after base64 encoding it
+func (w *Writer) Base64Bytes(data []byte) {
+	if data == nil {
+		w.Buffer.AppendString("null")
+		return
+	}
+	w.Buffer.AppendByte('"')
+	w.base64(data)
+	w.Buffer.AppendByte('"')
+}
+
+func (w *Writer) Uint8(n uint8) {
+	w.Buffer.EnsureSpace(3)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint16(n uint16) {
+	w.Buffer.EnsureSpace(5)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint32(n uint32) {
+	w.Buffer.EnsureSpace(10)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint(n uint) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint64(n uint64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Int8(n int8) {
+	w.Buffer.EnsureSpace(4)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int16(n int16) {
+	w.Buffer.EnsureSpace(6)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int32(n int32) {
+	w.Buffer.EnsureSpace(11)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int(n int) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int64(n int64) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Uint8Str(n uint8) {
+	w.Buffer.EnsureSpace(3)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint16Str(n uint16) {
+	w.Buffer.EnsureSpace(5)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint32Str(n uint32) {
+	w.Buffer.EnsureSpace(10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintStr(n uint) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint64Str(n uint64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintptrStr(n uintptr) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int8Str(n int8) {
+	w.Buffer.EnsureSpace(4)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int16Str(n int16) {
+	w.Buffer.EnsureSpace(6)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int32Str(n int32) {
+	w.Buffer.EnsureSpace(11)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) IntStr(n int) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int64Str(n int64) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float32(n float32) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+}
+
+func (w *Writer) Float32Str(n float32) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float64(n float64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
+}
+
+func (w *Writer) Float64Str(n float64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Bool(v bool) {
+	w.Buffer.EnsureSpace(5)
+	if v {
+		w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
+	} else {
+		w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
+	}
+}
+
+const chars = "0123456789abcdef"
+
+func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
+	// Note: might make sense to use a table if there are more chars to escape. With 4 chars
+	// it benchmarks the same.
+	if escapeHTML {
+		return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+	} else {
+		return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+	}
+}
+
+func (w *Writer) String(s string) {
+	w.Buffer.AppendByte('"')
+
+	// Portions of the string that contain no escapes are appended as
+	// byte slices.
+
+	p := 0 // last non-escape symbol
+
+	for i := 0; i < len(s); {
+		c := s[i]
+
+		if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
+			// single-width character, no escaping is required
+			i++
+			continue
+		} else if c < utf8.RuneSelf {
+			// single-with character, need to escape
+			w.Buffer.AppendString(s[p:i])
+			switch c {
+			case '\t':
+				w.Buffer.AppendString(`\t`)
+			case '\r':
+				w.Buffer.AppendString(`\r`)
+			case '\n':
+				w.Buffer.AppendString(`\n`)
+			case '\\':
+				w.Buffer.AppendString(`\\`)
+			case '"':
+				w.Buffer.AppendString(`\"`)
+			default:
+				w.Buffer.AppendString(`\u00`)
+				w.Buffer.AppendByte(chars[c>>4])
+				w.Buffer.AppendByte(chars[c&0xf])
+			}
+
+			i++
+			p = i
+			continue
+		}
+
+		// broken utf
+		runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
+		if runeValue == utf8.RuneError && runeWidth == 1 {
+			w.Buffer.AppendString(s[p:i])
+			w.Buffer.AppendString(`\ufffd`)
+			i++
+			p = i
+			continue
+		}
+
+		// jsonp stuff - tab separator and line separator
+		if runeValue == '\u2028' || runeValue == '\u2029' {
+			w.Buffer.AppendString(s[p:i])
+			w.Buffer.AppendString(`\u202`)
+			w.Buffer.AppendByte(chars[runeValue&0xf])
+			i += runeWidth
+			p = i
+			continue
+		}
+		i += runeWidth
+	}
+	w.Buffer.AppendString(s[p:])
+	w.Buffer.AppendByte('"')
+}
+
+const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+const padChar = '='
+
+func (w *Writer) base64(in []byte) {
+
+	if len(in) == 0 {
+		return
+	}
+
+	w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
+
+	si := 0
+	n := (len(in) / 3) * 3
+
+	for si < n {
+		// Convert 3x 8bit source bytes into 4 bytes
+		val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
+
+		w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
+
+		si += 3
+	}
+
+	remain := len(in) - si
+	if remain == 0 {
+		return
+	}
+
+	// Add the remaining small block
+	val := uint(in[si+0]) << 16
+	if remain == 2 {
+		val |= uint(in[si+1]) << 8
+	}
+
+	w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
+
+	switch remain {
+	case 2:
+		w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
+	case 1:
+		w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
+	}
+}