client: regenerate sources for etcd/client with new codec version

Major updates to ugorji/go changed the signature of some
methods, resulting in the build failing for etcd/client
with default installation of the codec.

We regenerate the sources using codecgen with the new version
to reflect on the new changes.

Fixes #8573

Signed-off-by: Alexandre Beslic <abeslic@abronan.com>
This commit is contained in:
Alexandre Beslic 2017-09-19 15:14:58 +02:00
parent 5bb9f9591f
commit 8dc4833a3e
No known key found for this signature in database
GPG Key ID: A0B28917D0A352AD
32 changed files with 8301 additions and 10745 deletions

View File

@ -37,10 +37,10 @@ var (
type codecSelfer1819 struct{}
func init() {
if codec1978.GenVersion != 5 {
if codec1978.GenVersion != 6 {
_, file, _, _ := runtime.Caller(0)
err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
5, codec1978.GenVersion, file)
6, codec1978.GenVersion, file)
panic(err)
}
if false { // reference the types, but skip this branch at build/run time
@ -189,7 +189,7 @@ func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1819)
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
yys3Slc = r.DecodeStringAsBytes()
yys3 := string(yys3Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1819)
switch yys3 {
@ -601,7 +601,7 @@ func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
}
}
z.DecSendContainerState(codecSelfer_containerMapKey1819)
yys3Slc = r.DecodeBytes(yys3Slc, true, true)
yys3Slc = r.DecodeStringAsBytes()
yys3 := string(yys3Slc)
z.DecSendContainerState(codecSelfer_containerMapValue1819)
switch yys3 {
@ -982,79 +982,45 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) {
yyv1 = yyv1[:0]
yyc1 = true
}
} else if yyl1 > 0 {
var yyrr1, yyrl1 int
var yyrt1 bool
_, _ = yyrl1, yyrt1
yyrr1 = yyl1 // len(yyv1)
if yyl1 > cap(yyv1) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
if yyrt1 {
} else {
yyhl1 := yyl1 > 0
var yyrl1 int
if yyhl1 {
if yyl1 > cap(yyv1) {
yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]
} else {
yyv1 = make([]*Node, yyrl1)
}
} else {
yyv1 = make([]*Node, yyrl1)
}
yyc1 = true
yyrr1 = len(yyv1)
if yyrg1 {
copy(yyv1, yyv21)
}
} else if yyl1 != len(yyv1) {
yyv1 = yyv1[:yyl1]
yyc1 = true
}
yyj1 := 0
for ; yyj1 < yyrr1; yyj1++ {
yyh1.ElemContainerState(yyj1)
if r.TryDecodeAsNil() {
if yyv1[yyj1] != nil {
*yyv1[yyj1] = Node{}
}
} else {
if yyv1[yyj1] == nil {
yyv1[yyj1] = new(Node)
}
yyw2 := yyv1[yyj1]
yyw2.CodecDecodeSelf(d)
}
}
if yyrt1 {
for ; yyj1 < yyl1; yyj1++ {
yyv1 = append(yyv1, nil)
yyh1.ElemContainerState(yyj1)
if r.TryDecodeAsNil() {
if yyv1[yyj1] != nil {
*yyv1[yyj1] = Node{}
}
} else {
if yyv1[yyj1] == nil {
yyv1[yyj1] = new(Node)
}
yyw3 := yyv1[yyj1]
yyw3.CodecDecodeSelf(d)
}
}
}
} else {
yyj1 := 0
for ; !r.CheckBreak(); yyj1++ {
if yyj1 >= len(yyv1) {
yyv1 = append(yyv1, nil) // var yyz1 *Node
yyc1 = true
} else if yyl1 != len(yyv1) {
yyv1 = yyv1[:yyl1]
yyc1 = true
}
}
yyj1 := 0
for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ {
if yyj1 == 0 && len(yyv1) == 0 {
if yyhl1 {
yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
} else {
yyrl1 = 8
}
yyv1 = make([]*Node, yyrl1)
yyc1 = true
}
// if indefinite, etc, then expand the slice if necessary
var yydb1 bool
if yyj1 >= len(yyv1) {
yyv1 = append(yyv1, nil)
yyc1 = true
}
yyh1.ElemContainerState(yyj1)
if yyj1 < len(yyv1) {
if yydb1 {
z.DecSwallow()
} else {
if r.TryDecodeAsNil() {
if yyv1[yyj1] != nil {
*yyv1[yyj1] = Node{}
@ -1063,20 +1029,17 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) {
if yyv1[yyj1] == nil {
yyv1[yyj1] = new(Node)
}
yyw4 := yyv1[yyj1]
yyw4.CodecDecodeSelf(d)
yyw2 := yyv1[yyj1]
yyw2.CodecDecodeSelf(d)
}
} else {
z.DecSwallow()
}
}
if yyj1 < len(yyv1) {
yyv1 = yyv1[:yyj1]
yyc1 = true
} else if yyj1 == 0 && yyv1 == nil {
yyv1 = []*Node{}
yyv1 = make([]*Node, 0)
yyc1 = true
}
}
@ -1084,4 +1047,5 @@ func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) {
if yyc1 {
*v = yyv1
}
}

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a MIT license found in the LICENSE file.
/*
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
binc, msgpack, cbor, json.
High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library for
binc, msgpack, cbor, json
Supported Serialization formats are:
@ -11,21 +11,17 @@ Supported Serialization formats are:
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
- simple:
To install:
go get github.com/ugorji/go/codec
This package understands the 'unsafe' tag, to allow using unsafe semantics:
- When decoding into a struct, you need to read the field name as a string
so you can find the struct field it is mapped to.
Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
To install using unsafe, pass the 'unsafe' tag:
go get -tags=unsafe github.com/ugorji/go/codec
This package will carefully use 'unsafe' for performance reasons in specific places.
You can build without unsafe use by passing the safe or appengine tag
i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3
go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from
go 1.7+ . This is because supporting unsafe requires knowledge of implementation details.
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
@ -38,9 +34,9 @@ Rich Feature Set includes:
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Multiple conversions:
Package coerces types where appropriate
Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases:
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
@ -56,7 +52,7 @@ Rich Feature Set includes:
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
@ -68,12 +64,12 @@ Rich Feature Set includes:
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
Extension Support
Users can register a function to handle the encoding or decoding of

View File

@ -356,6 +356,9 @@ func (d *bincDecDriver) uncacheRead() {
}
func (d *bincDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
if d.vd == bincVdSpecial && d.vs == bincSpNil {
return valueTypeNil
} else if d.vd == bincVdByteArray {
@ -580,6 +583,9 @@ func (d *bincDecDriver) DecodeBool() (b bool) {
}
func (d *bincDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
if d.vd != bincVdMap {
d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
return
@ -590,6 +596,9 @@ func (d *bincDecDriver) ReadMapStart() (length int) {
}
func (d *bincDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
if d.vd != bincVdArray {
d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
return
@ -639,12 +648,12 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
if d.br {
bs2 = d.r.readx(slen)
} else if len(bs) == 0 {
bs2 = decByteSlice(d.r, slen, d.b[:])
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:])
} else {
bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
}
} else {
bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs)
}
if withString {
s = string(bs2)
@ -696,7 +705,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
// since using symbols, do not store any part of
// the parameter bs in the map, as it might be a shared buffer.
// bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, nil)
bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil)
if withString {
s = string(bs2)
}
@ -719,11 +728,12 @@ func (d *bincDecDriver) DecodeString() (s string) {
return
}
func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if isstring {
bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
return
}
func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) {
s, _ = d.decStringAndBytes(d.b[:], false, true)
return
}
func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
@ -747,7 +757,7 @@ func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
@ -780,7 +790,7 @@ func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []b
}
xbs = d.r.readx(l)
} else if d.vd == bincVdByteArray {
xbs = d.DecodeBytes(nil, false, true)
xbs = d.DecodeBytes(nil, true)
} else {
d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
return
@ -849,7 +859,7 @@ func (d *bincDecDriver) DecodeNaked() {
n.s = d.DecodeString()
case bincVdByteArray:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
n.l = d.DecodeBytes(nil, false)
case bincVdTimestamp:
n.v = valueTypeTimestamp
tt, err := decodeTime(d.r.readx(int(d.vs)))
@ -910,7 +920,7 @@ func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
}
func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (e *bincEncDriver) reset() {
@ -920,7 +930,7 @@ func (e *bincEncDriver) reset() {
}
func (d *bincDecDriver) reset() {
d.r = d.d.r
d.r, d.br = d.d.r, d.d.bytes
d.s = nil
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
}

View File

@ -196,6 +196,9 @@ func (d *cborDecDriver) uncacheRead() {
}
func (d *cborDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil {
return valueTypeNil
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
@ -351,6 +354,9 @@ func (d *cborDecDriver) DecodeBool() (b bool) {
}
func (d *cborDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
if d.bd == cborBdIndefiniteMap {
return -1
@ -359,6 +365,9 @@ func (d *cborDecDriver) ReadMapStart() (length int) {
}
func (d *cborDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
if d.bd == cborBdIndefiniteArray {
return -1
@ -398,7 +407,7 @@ func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
return bs
}
func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
@ -421,11 +430,15 @@ func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *cborDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
return string(d.DecodeBytes(d.b[:], true))
}
func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.b[:], true)
}
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
@ -476,7 +489,7 @@ func (d *cborDecDriver) DecodeNaked() {
n.f = d.DecodeFloat(false)
case cborBdIndefiniteBytes:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
n.l = d.DecodeBytes(nil, false)
case cborBdIndefiniteString:
n.v = valueTypeString
n.s = d.DecodeString()
@ -501,7 +514,7 @@ func (d *cborDecDriver) DecodeNaked() {
n.i = d.DecodeInt(64)
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
n.l = d.DecodeBytes(nil, false)
case d.bd >= cborBaseString && d.bd < cborBaseArray:
n.v = valueTypeString
n.s = d.DecodeString()
@ -576,7 +589,7 @@ func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
}
func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (e *cborEncDriver) reset() {
@ -584,7 +597,7 @@ func (e *cborEncDriver) reset() {
}
func (d *cborDecDriver) reset() {
d.r = d.d.r
d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false
}

File diff suppressed because it is too large Load Diff

View File

@ -313,21 +313,21 @@ type encFnInfo struct {
}
func (f *encFnInfo) builtin(rv reflect.Value) {
f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
f.e.e.EncodeBuiltin(f.ti.rtid, rv2i(rv))
}
func (f *encFnInfo) raw(rv reflect.Value) {
f.e.raw(rv.Interface().(Raw))
f.e.raw(rv2i(rv).(Raw))
}
func (f *encFnInfo) rawExt(rv reflect.Value) {
// rev := rv.Interface().(RawExt)
// rev := rv2i(rv).(RawExt)
// f.e.e.EncodeRawExt(&rev, f.e)
var re *RawExt
if rv.CanAddr() {
re = rv.Addr().Interface().(*RawExt)
re = rv2i(rv.Addr()).(*RawExt)
} else {
rev := rv.Interface().(RawExt)
rev := rv2i(rv).(RawExt)
re = &rev
}
f.e.e.EncodeRawExt(re, f.e)
@ -338,21 +338,21 @@ func (f *encFnInfo) ext(rv reflect.Value) {
if k := rv.Kind(); (k == reflect.Struct || k == reflect.Array) && rv.CanAddr() {
rv = rv.Addr()
}
f.e.e.EncodeExt(rv.Interface(), f.xfTag, f.xfFn, f.e)
f.e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, f.e)
}
func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v interface{}, proceed bool) {
if indir == 0 {
v = rv.Interface()
v = rv2i(rv)
} else if indir == -1 {
// If a non-pointer was passed to Encode(), then that value is not addressable.
// Take addr if addressable, else copy value to an addressable value.
if rv.CanAddr() {
v = rv.Addr().Interface()
v = rv2i(rv.Addr())
} else {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
v = rv2i(rv2)
// fmt.Printf("rv.Type: %v, rv2.Type: %v, v: %v\n", rv.Type(), rv2.Type(), v)
}
} else {
@ -363,7 +363,7 @@ func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v
}
rv = rv.Elem()
}
v = rv.Interface()
v = rv2i(rv)
}
return v, true
}
@ -383,7 +383,7 @@ func (f *encFnInfo) binaryMarshal(rv reflect.Value) {
func (f *encFnInfo) textMarshal(rv reflect.Value) {
if v, proceed := f.getValueForMarshalInterface(rv, f.ti.tmIndir); proceed {
// debugf(">>>> encoding.TextMarshaler: %T", rv.Interface())
// debugf(">>>> encoding.TextMarshaler: %T", rv2i(rv))
bs, fnerr := v.(encoding.TextMarshaler).MarshalText()
f.e.marshal(bs, fnerr, false, c_UTF8)
}
@ -476,10 +476,10 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
bs := e.b[:0]
// do not use range, so that the number of elements encoded
// does not change, and encoding does not hang waiting on someone to close chan.
// for b := range rv.Interface().(<-chan byte) {
// for b := range rv2i(rv).(<-chan byte) {
// bs = append(bs, b)
// }
ch := rv.Interface().(<-chan byte)
ch := rv2i(rv).(<-chan byte)
for i := 0; i < l; i++ {
bs = append(bs, <-ch)
}
@ -507,7 +507,7 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
// a concrete type and kInterface will bomb.
var fn *encFn
if rtelem.Kind() != reflect.Interface {
rtelemid := reflect.ValueOf(rtelem).Pointer()
rtelemid := rt2id(rtelem)
fn = e.getEncFn(rtelemid, rtelem, true, true)
}
// TODO: Consider perf implication of encoding odd index values as symbols if type is string
@ -680,7 +680,7 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
ti := f.ti
rtkey := ti.rt.Key()
rtval := ti.rt.Elem()
rtkeyid := reflect.ValueOf(rtkey).Pointer()
rtkeyid := rt2id(rtkey)
// keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String
var keyTypeIsString = rtkeyid == stringTypId
if keyTypeIsString {
@ -690,7 +690,7 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
rtkey = rtkey.Elem()
}
if rtkey.Kind() != reflect.Interface {
rtkeyid = reflect.ValueOf(rtkey).Pointer()
rtkeyid = rt2id(rtkey)
keyFn = e.getEncFn(rtkeyid, rtkey, true, true)
}
}
@ -698,7 +698,7 @@ func (f *encFnInfo) kMap(rv reflect.Value) {
rtval = rtval.Elem()
}
if rtval.Kind() != reflect.Interface {
rtvalid := reflect.ValueOf(rtval).Pointer()
rtvalid := rt2id(rtval)
valFn = e.getEncFn(rtvalid, rtval, true, true)
}
mks := rv.MapKeys()
@ -1027,6 +1027,8 @@ func (e *Encoder) ResetBytes(out *[]byte) {
// However, struct values may encode as arrays. This happens when:
// - StructToArray Encode option is set, OR
// - the tag on the _struct field sets the "toarray" option
// Note that omitempty is ignored when encoding struct values as arrays,
// as an entry must be encoded for each field, to maintain its position.
//
// Values with types that implement MapBySlice are encoded as stream maps.
//
@ -1053,8 +1055,7 @@ func (e *Encoder) ResetBytes(out *[]byte) {
// }
//
// type MyStruct struct {
// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field
// //and encode struct as an array
// _struct bool `codec:",toarray"` //encode struct as an array
// }
//
// The mode of encoding is based on the type of the value. When a value is seen:
@ -1215,7 +1216,7 @@ func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr,
}
if fn == nil {
rt := rv.Type()
rtid := reflect.ValueOf(rt).Pointer()
rtid := rt2id(rt)
// fn = e.getEncFn(rtid, rt, true, true)
fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
}
@ -1239,7 +1240,7 @@ func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) {
}
func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *encFn) {
// rtid := reflect.ValueOf(rt).Pointer()
// rtid := rt2id(rt)
var ok bool
if useMapForCodecCache {
fn, ok = e.f[rtid]
@ -1309,7 +1310,7 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo
} else {
rtu = reflect.SliceOf(rt.Elem())
}
rtuid := reflect.ValueOf(rtu).Pointer()
rtuid := rt2id(rtu)
if idx := fastpathAV.index(rtuid); idx != -1 {
xfnf := fastpathAV[idx].encfn
xrt := fastpathAV[idx].rt

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,9 @@ import (
"reflect"
)
// GenVersion is the current version of codecgen.
const GenVersion = 6
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
@ -26,12 +29,14 @@ import (
// to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
return genHelperEncoder{e: e}, e.e
}
// GenHelperDecoder is exported so that it can be used externally by codecgen.
//
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
return genHelperDecoder{d: d}, d.d
@ -112,7 +117,7 @@ func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
if rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
rtid := reflect.ValueOf(rt).Pointer()
rtid := rt2id(rt)
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true
@ -172,7 +177,7 @@ func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes())
if fnerr != nil {
panic(fnerr)
}
@ -180,7 +185,7 @@ func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
// bs := f.dd.DecodeStringAsBytes()
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
@ -190,7 +195,7 @@ func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true))
if fnerr != nil {
panic(fnerr)
}
@ -222,7 +227,7 @@ func (f genHelperDecoder) HasExtensions() bool {
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v).Elem()
rtid := reflect.ValueOf(rt).Pointer()
rtid := rt2id(rt)
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true
@ -231,10 +236,15 @@ func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) StringView(v []byte) string {
return stringView(v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) {
if f.d.cr != nil {

View File

@ -10,7 +10,7 @@ const genDecMapTmpl = `
{{var "l"}} := r.ReadMapStart()
{{var "bh"}} := z.DecBasicHandle()
if {{var "v"}} == nil {
{{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
{{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
@ -22,8 +22,9 @@ if {{var "bh"}}.MapValueReset {
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} > 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
if {{var "l"}} != 0 {
{{var "hl"}} := {{var "l"}} > 0
for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
@ -42,26 +43,6 @@ for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}{{ end }}{{if decElemKindPtr}}
{{var "ms"}} = true {{ end }}
if {{var "mg"}} {
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
if {{var "mok"}} {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
`
@ -82,94 +63,60 @@ if {{var "l"}} == 0 {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
} {{end}}
} else if {{var "l"}} > 0 {
{{if isChan }}if {{var "v"}} == nil {
{{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
{{var "c"}} = true
}
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
{{var "h"}}.ElemContainerState({{var "r"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
}
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
var {{var "rt"}} bool {{/* truncated */}}
_, _ = {{var "rl"}}, {{var "rt"}}
{{var "rr"}} = {{var "l"}} // len({{var "v"}})
} else {
{{var "hl"}} := {{var "l"}} > 0
var {{var "rl"}} int
{{if isSlice }} if {{var "hl"}} {
if {{var "l"}} > cap({{var "v"}}) {
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
{{ else }}{{if not .Immutable }}
{{var "rg"}} := len({{var "v"}}) > 0
{{var "v2"}} := {{var "v"}} {{end}}
{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rt"}} {
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
{{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
} {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
} {{end}} {{/* end isSlice:47 */}}
}
} {{end}}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
{{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
{{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
{{var "h"}}.ElemContainerState({{var "j"}})
z.DecSwallow()
}
{{ else }}if {{var "rt"}} {
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
{{var "v"}} = append({{var "v"}}, {{ zero}})
{{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ {
if {{var "j"}} == 0 && len({{var "v"}}) == 0 {
if {{var "hl"}} {
{{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
} else {
{{var "rl"}} = 8
}
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
{{var "c"}} = true
}
} {{end}} {{/* end isArray:56 */}}
{{end}} {{/* end isChan:16 */}}
} else { {{/* len < 0 */}}
{{var "j"}} := 0
for ; !r.CheckBreak(); {{var "j"}}++ {
{{if isChan }}
{{var "h"}}.ElemContainerState({{var "j"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
// if indefinite, etc, then expand the slice if necessary
var {{var "db"}} bool
if {{var "j"}} >= len({{var "v"}}) {
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
{{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
{{var "c"}} = true {{end}}
{{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}); {{var "c"}} = true
{{end}} {{if isArray}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true
{{end}}
}
{{var "h"}}.ElemContainerState({{var "j"}})
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
if {{var "db"}} {
z.DecSwallow()
} else {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
{{end}}
}
{{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
{{if isSlice}} if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "v"}} = make([]{{ .Typ }}, 0)
{{var "c"}} = true
}{{end}}
} {{end}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}{{end}}
`

View File

@ -1,3 +1,5 @@
// +build codecgen.exec
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
@ -80,6 +82,10 @@ import (
// Note:
// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
// This way, there isn't a function call overhead just to see that we should not enter a block of code.
//
// Note:
// codecgen-generated code depends on the variables defined by fast-path.generated.go.
// consequently, you cannot run with tags "codecgen notfastpath".
// GenVersion is the current version of codecgen.
//
@ -94,7 +100,8 @@ import (
// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
const GenVersion = 5
// v6: removed unsafe from gen, and now uses codecgen.exec tag
const genVersion = 6
const (
genCodecPkg = "codec1978"
@ -126,7 +133,6 @@ var (
genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
genCheckVendor bool
)
// genRunner holds some state used during a Gen run.
@ -147,8 +153,7 @@ type genRunner struct {
is map[reflect.Type]struct{} // types seen during import search
bp string // base PkgPath, for which we are generating for
cpfx string // codec package prefix
unsafe bool // is unsafe to be used in generated code?
cpfx string // codec package prefix
tm map[reflect.Type]struct{} // types for which enc/dec must be generated
ts []reflect.Type // types for which enc/dec must be generated
@ -163,8 +168,8 @@ type genRunner struct {
// Gen will write a complete go file containing Selfer implementations for each
// type passed. All the types must be in the same package.
//
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func Gen(w io.Writer, buildTags, pkgName, uid string, ti *TypeInfos, typ ...reflect.Type) {
// All types passed to this method do not have a codec.Selfer method implemented directly.
// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
// Consequently, there's no need to check and trim them if they implement codec.Selfer
@ -173,19 +178,18 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
return
}
x := genRunner{
unsafe: useUnsafe,
w: w,
t: typ,
te: make(map[uintptr]bool),
td: make(map[uintptr]bool),
im: make(map[string]reflect.Type),
imn: make(map[string]string),
is: make(map[reflect.Type]struct{}),
tm: make(map[reflect.Type]struct{}),
ts: []reflect.Type{},
bp: genImportPath(typ[0]),
xs: uid,
ti: ti,
w: w,
t: typ,
te: make(map[uintptr]bool),
td: make(map[uintptr]bool),
im: make(map[string]reflect.Type),
imn: make(map[string]string),
is: make(map[reflect.Type]struct{}),
tm: make(map[reflect.Type]struct{}),
ts: []reflect.Type{},
bp: genImportPath(typ[0]),
xs: uid,
ti: ti,
}
if x.ti == nil {
x.ti = defTypeInfos
@ -234,11 +238,8 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
x.linef("%s \"%s\"", x.imn[k], k)
}
// add required packages
for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} {
for _, k := range [...]string{"reflect", "runtime", "fmt", "errors"} {
if _, ok := x.im[k]; !ok {
if k == "unsafe" && !x.unsafe {
continue
}
x.line("\"" + k + "\"")
}
}
@ -265,20 +266,16 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
x.line(")")
x.line("")
if x.unsafe {
x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}")
x.line("")
}
x.hn = "codecSelfer" + x.xs
x.line("type " + x.hn + " struct{}")
x.line("")
x.varsfxreset()
x.line("func init() {")
x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
x.linef("if %sGenVersion != %v {", x.cpfx, genVersion)
x.line("_, file, _, _ := runtime.Caller(0)")
x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx)
x.linef(`%v, %sGenVersion, file)`, genVersion, x.cpfx)
x.line("panic(err)")
x.linef("}")
x.line("if false { // reference the types, but skip this branch at build/run time")
@ -289,10 +286,6 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
n++
}
if x.unsafe {
x.linef("var v%v unsafe.Pointer", n)
n++
}
if n > 0 {
x.out("_")
for i := 1; i < n; i++ {
@ -315,7 +308,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
}
for _, t := range x.ts {
rtid := reflect.ValueOf(t).Pointer()
rtid := rt2id(t)
// generate enc functions for all these slice/map types.
x.varsfxreset()
x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
@ -545,21 +538,21 @@ func (x *genRunner) selfer(encode bool) {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated)
x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated)
x.line("}")
x.line("")
} else {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix)
x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix)
x.line("}")
x.line("")
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak)
x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak)
x.line("}")
x.line("")
}
@ -568,7 +561,7 @@ func (x *genRunner) selfer(encode bool) {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0)
x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0)
x.line("}")
x.line("")
@ -645,7 +638,7 @@ func (x *genRunner) encVar(varname string, t reflect.Type) {
// enc will encode a variable (varname) of type t,
// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying)
func (x *genRunner) enc(varname string, t reflect.Type) {
rtid := reflect.ValueOf(t).Pointer()
rtid := rt2id(t)
// We call CodecEncodeSelf if one of the following are honored:
// - the type already implements Selfer, call that
// - the type has a Selfer implementation just created, use that
@ -1098,7 +1091,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
// assumptions:
// - the varname is to a pointer already. No need to take address of it
// - t is always a baseType T (not a *T, etc).
rtid := reflect.ValueOf(t).Pointer()
rtid := rt2id(t)
tptr := reflect.PtrTo(t)
if x.checkForSelfer(t, varname) {
if t.Implements(selferTyp) || tptr.Implements(selferTyp) {
@ -1231,7 +1224,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
// - if elements are primitives or Selfers, call dedicated function on each member.
// - else call Encoder.encode(XXX) on it.
if rtid == uint8SliceTypId {
x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)")
x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false)")
} else if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
@ -1318,11 +1311,11 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs
func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
if t.AssignableTo(uint8SliceTyp) {
x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)")
x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)")
return
}
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname)
x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], true)", t.Len(), varname)
return
}
type tstruc struct {
@ -1469,17 +1462,6 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
i := x.varsfx()
kName := tpfx + "s" + i
// We thought to use ReadStringAsBytes, as go compiler might optimize the copy out.
// However, using that was more expensive, as it seems that the switch expression
// is evaluated each time.
//
// We could depend on decodeString using a temporary/shared buffer internally.
// However, this model of creating a byte array, and using explicitly is faster,
// and allows optional use of unsafe []byte->string conversion without alloc.
// Also, ensure that the slice array doesn't escape.
// That will help escape analysis prevent allocation when it gets better.
// x.line("var " + kName + "Arr = [32]byte{} // default string to decode into")
// x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into")
// use the scratch buffer to avoid allocation (most field names are < 32).
@ -1499,15 +1481,9 @@ func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t ref
x.line("} else { if r.CheckBreak() { break }; }")
}
x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)")
x.line(kName + "Slc = r.DecodeStringAsBytes()")
// let string be scoped to this loop alone, so it doesn't escape.
if x.unsafe {
x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" +
kName + "Slc[0])), len(" + kName + "Slc)}")
x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))")
} else {
x.line(kName + " := string(" + kName + "Slc)")
}
x.line(kName + " := string(" + kName + "Slc)")
x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.decStructMapSwitch(kName, varname, rtid, t)
@ -1653,15 +1629,8 @@ func (x *genV) MethodNamePfx(prefix string, prim bool) string {
func genImportPath(t reflect.Type) (s string) {
s = t.PkgPath()
if genCheckVendor {
// HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
// if s contains /vendor/ OR startsWith vendor/, then return everything after it.
const vendorStart = "vendor/"
const vendorInline = "/vendor/"
if i := strings.LastIndex(s, vendorInline); i >= 0 {
s = s[i+len(vendorInline):]
} else if strings.HasPrefix(s, vendorStart) {
s = s[len(vendorStart):]
}
// HACK: always handle vendoring. It should be typically on in go 1.6, 1.7
s = stripVendor(s)
}
return
}
@ -1783,8 +1752,8 @@ func genIsImmutable(t reflect.Type) (v bool) {
}
type genInternal struct {
Values []genV
Unsafe bool
Version int
Values []genV
}
func (x genInternal) FastpathLen() (l int) {
@ -1884,8 +1853,21 @@ func genInternalSortType(s string, elem bool) string {
panic("sorttype: unexpected type: " + s)
}
func stripVendor(s string) string {
// HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
// if s contains /vendor/ OR startsWith vendor/, then return everything after it.
const vendorStart = "vendor/"
const vendorInline = "/vendor/"
if i := strings.LastIndex(s, vendorInline); i >= 0 {
s = s[i+len(vendorInline):]
} else if strings.HasPrefix(s, vendorStart) {
s = s[len(vendorStart):]
}
return s
}
// var genInternalMu sync.Mutex
var genInternalV genInternal
var genInternalV = genInternal{Version: genVersion}
var genInternalTmplFuncs template.FuncMap
var genInternalOnce sync.Once
@ -1948,7 +1930,7 @@ func genInternalInit() {
"float64": 8,
"bool": 1,
}
var gt genInternal
var gt = genInternal{Version: genVersion}
// For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
for _, s := range types {
@ -1980,11 +1962,10 @@ func genInternalInit() {
// It is run by the program author alone.
// Unfortunately, it has to be exported so that it can be called from a command line tool.
// *** DO NOT USE ***
func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) {
func genInternalGoFile(r io.Reader, w io.Writer) (err error) {
genInternalOnce.Do(genInternalInit)
gt := genInternalV
gt.Unsafe = !safe
t := template.New("").Funcs(genInternalTmplFuncs)

View File

@ -9,8 +9,6 @@ import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem()
reflect.Copy(rvn2, rvn)
return
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
return reflect.ArrayOf(count, elem)
}

View File

@ -9,6 +9,6 @@ import "reflect"
const reflectArrayOfSupported = false
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
panic("reflect.ArrayOf unsupported")
func reflectArrayOf(count int, elem reflect.Type) reflect.Type {
panic("codec: reflect.ArrayOf unsupported in this go version")
}

View File

@ -0,0 +1,15 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.9
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
if size < 0 {
return reflect.MakeMapWithSize(t, 4)
}
return reflect.MakeMapWithSize(t, size)
}

View File

@ -0,0 +1,12 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.9
package codec
import "reflect"
func makeMapReflect(t reflect.Type, size int) reflect.Value {
return reflect.MakeMap(t)
}

View File

@ -0,0 +1,17 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.4
package codec
// This codec package will only work for go1.4 and above.
// This is for the following reasons:
// - go 1.4 was released in 2014
// - go runtime is written fully in go
// - interface only holds pointers
// - reflect.Value is stabilized as 3 words
func init() {
panic("codec: go 1.3 and below are not supported")
}

View File

@ -7,6 +7,4 @@ package codec
import "os"
func init() {
genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
}
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"

View File

@ -1,12 +1,10 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.6
// +build go1.6,!go1.7
package codec
import "os"
func init() {
genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
}
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"

View File

@ -5,6 +5,4 @@
package codec
func init() {
genCheckVendor = true
}
const genCheckVendor = true

View File

@ -0,0 +1,8 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
var genCheckVendor = false

View File

@ -133,11 +133,11 @@ const (
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
recoverPanicToErr = true
// if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
// Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
// resetSliceElemToZeroValue: on decoding a slice, reset the element to a zero value first.
// concern: if the slice already contained some garbage, we will decode into that garbage.
// The chances of this are slim, so leave this "optimization".
// TODO: should this be true, to ensure that we always decode into a "zero" "empty" value?
resetSliceElemToZeroValue bool = false
resetSliceElemToZeroValue = false
)
var (
@ -250,6 +250,8 @@ type jsonUnmarshaler interface {
UnmarshalJSON([]byte) error
}
// type byteAccepter func(byte) bool
var (
bigen = binary.BigEndian
structInfoFieldName = "_struct"
@ -278,17 +280,17 @@ var (
selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
rawTypId = reflect.ValueOf(rawTyp).Pointer()
intfTypId = reflect.ValueOf(intfTyp).Pointer()
timeTypId = reflect.ValueOf(timeTyp).Pointer()
stringTypId = reflect.ValueOf(stringTyp).Pointer()
uint8SliceTypId = rt2id(uint8SliceTyp)
rawExtTypId = rt2id(rawExtTyp)
rawTypId = rt2id(rawTyp)
intfTypId = rt2id(intfTyp)
timeTypId = rt2id(timeTyp)
stringTypId = rt2id(stringTyp)
mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
// mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
mapStrIntfTypId = rt2id(mapStrIntfTyp)
mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
intfSliceTypId = rt2id(intfSliceTyp)
// mapBySliceTypId = rt2id(mapBySliceTyp)
intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
@ -303,6 +305,24 @@ var (
var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
var immutableKindsSet = [32]bool{
reflect.Int: true,
reflect.Int8: true,
reflect.Int16: true,
reflect.Int32: true,
reflect.Int64: true,
reflect.Uint: true,
reflect.Uint8: true,
reflect.Uint16: true,
reflect.Uint32: true,
reflect.Uint64: true,
reflect.Uintptr: true,
reflect.Float32: true,
reflect.Float64: true,
reflect.Bool: true,
reflect.String: true,
}
// Selfer defines methods by which a value can encode or decode itself.
//
// Any type which implements Selfer will be able to encode or decode itself.
@ -343,10 +363,10 @@ func (x *BasicHandle) getBasicHandle() *BasicHandle {
}
func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
if x.TypeInfos != nil {
return x.TypeInfos.get(rtid, rt)
if x.TypeInfos == nil {
return defTypeInfos.get(rtid, rt)
}
return defTypeInfos.get(rtid, rt)
return x.TypeInfos.get(rtid, rt)
}
// Handle is the interface for a specific encoding format.
@ -476,9 +496,6 @@ func (x *setExtWrapper) UpdateExt(dest interface{}, v interface{}) {
x.i.UpdateExt(dest, v)
}
// type errorString string
// func (x errorString) Error() string { return string(x) }
type binaryEncodingType struct{}
func (_ binaryEncodingType) isBinary() bool { return true }
@ -559,7 +576,7 @@ func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
return
}
rtid := reflect.ValueOf(rt).Pointer()
rtid := rt2id(rt)
for _, v := range *o {
if v.rtid == rtid {
v.tag, v.ext = tag, ext
@ -711,6 +728,7 @@ type typeInfo struct {
rt reflect.Type
rtid uintptr
// rv0 reflect.Value // saved zero value, used if immutableKind
numMeth uint16 // number of methods
@ -743,42 +761,49 @@ type typeInfo struct {
toArray bool // whether this (struct) type should be encoded as an array
}
// linear search. faster than binary search in my testing up to 16-field structs.
const binarySearchThreshold = 8 // similar to what python does for hashtables
func (ti *typeInfo) indexForEncName(name string) int {
// NOTE: name may be a stringView, so don't pass it to another function.
//tisfi := ti.sfi
const binarySearchThreshold = 16
if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
// linear search. faster than binary search in my testing up to 16-field structs.
sfilen := len(ti.sfi)
if sfilen < binarySearchThreshold {
for i, si := range ti.sfi {
if si.encName == name {
return i
}
}
} else {
// binary search. adapted from sort/search.go.
h, i, j := 0, 0, sfilen
for i < j {
h = i + (j-i)/2
if ti.sfi[h].encName < name {
i = h + 1
} else {
j = h
}
}
if i < sfilen && ti.sfi[i].encName == name {
return i
return -1
}
// binary search. adapted from sort/search.go.
h, i, j := 0, 0, sfilen
for i < j {
h = i + (j-i)/2
if ti.sfi[h].encName < name {
i = h + 1
} else {
j = h
}
}
if i < sfilen && ti.sfi[i].encName == name {
return i
}
return -1
}
type rtid2ti struct {
rtid uintptr
ti *typeInfo
}
// TypeInfos caches typeInfo for each type on first inspection.
//
// It is configured with a set of tag keys, which are used to get
// configuration for the type.
type TypeInfos struct {
infos map[uintptr]*typeInfo
mu sync.RWMutex
infos atomicTypeInfoSlice // formerly map[uintptr]*typeInfo, now *[]rtid2ti
mu sync.Mutex
tags []string
}
@ -787,7 +812,7 @@ type TypeInfos struct {
// This allows users customize the struct tag keys which contain configuration
// of their types.
func NewTypeInfos(tags []string) *TypeInfos {
return &TypeInfos{tags: tags, infos: make(map[uintptr]*typeInfo, 64)}
return &TypeInfos{tags: tags}
}
func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
@ -802,20 +827,46 @@ func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
return
}
func (x *TypeInfos) find(sp *[]rtid2ti, rtid uintptr) (idx int, ti *typeInfo) {
// binary search. adapted from sort/search.go.
// fmt.Printf(">>>> calling typeinfos.find ... \n")
// if sp == nil {
// return -1, nil
// }
s := *sp
h, i, j := 0, 0, len(s)
for i < j {
h = i + (j-i)/2
if s[h].rtid < rtid {
i = h + 1
} else {
j = h
}
}
if i < len(s) && s[i].rtid == rtid {
return i, s[i].ti
}
return i, nil
}
func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
var ok bool
x.mu.RLock()
pti, ok = x.infos[rtid]
x.mu.RUnlock()
if ok {
return
// fmt.Printf(">>>> calling typeinfos.get ... \n")
sp := x.infos.load()
var idx int
if sp != nil {
idx, pti = x.find(sp, rtid)
if pti != nil {
return
}
}
// do not hold lock while computing this.
// it may lead to duplication, but that's ok.
ti := typeInfo{rt: rt, rtid: rtid}
ti.numMeth = uint16(rt.NumMethod())
// ti.rv0 = reflect.Zero(rt)
ti.numMeth = uint16(rt.NumMethod())
var ok bool
var indir int8
if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
ti.bm, ti.bmIndir = true, indir
@ -854,7 +905,7 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
ti.baseId = rtid
} else {
ti.base = pt
ti.baseId = reflect.ValueOf(pt).Pointer()
ti.baseId = rt2id(pt)
ti.baseIndir = ptIndir
}
@ -875,12 +926,28 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
}
// sfi = sfip
var vs []rtid2ti
x.mu.Lock()
if pti, ok = x.infos[rtid]; !ok {
sp = x.infos.load()
if sp == nil {
// fmt.Printf(">>>> in typeinfos.get: sp == nil\n")
pti = &ti
x.infos[rtid] = pti
vs = []rtid2ti{{rtid, pti}}
x.infos.store(&vs)
} else {
idx, pti = x.find(sp, rtid)
if pti == nil {
s := *sp
pti = &ti
vs = make([]rtid2ti, len(s)+1)
copy(vs, s[:idx])
vs[idx] = rtid2ti{rtid, pti}
copy(vs[idx+1:], s[idx:])
x.infos.store(&vs)
}
}
x.mu.Unlock()
// fmt.Printf(">>>>>>> TypeInfos: Num Elements: %v\n", len(*(x.infos.load())))
return
}
@ -932,7 +999,7 @@ LOOP:
}
if ft.Kind() == reflect.Struct {
// if etypes contains this, don't call rget again (as fields are already seen here)
ftid := reflect.ValueOf(ft).Pointer()
ftid := rt2id(ft)
// We cannot recurse forever, but we need to track other field depths.
// So - we break if we see a type twice (not the first time).
// This should be sufficient to handle an embedded type that refers to its
@ -1065,22 +1132,23 @@ func panicToErr(err *error) {
// }
func isImmutableKind(k reflect.Kind) (v bool) {
return false ||
k == reflect.Int ||
k == reflect.Int8 ||
k == reflect.Int16 ||
k == reflect.Int32 ||
k == reflect.Int64 ||
k == reflect.Uint ||
k == reflect.Uint8 ||
k == reflect.Uint16 ||
k == reflect.Uint32 ||
k == reflect.Uint64 ||
k == reflect.Uintptr ||
k == reflect.Float32 ||
k == reflect.Float64 ||
k == reflect.Bool ||
k == reflect.String
return immutableKindsSet[k]
// return false ||
// k == reflect.Int ||
// k == reflect.Int8 ||
// k == reflect.Int16 ||
// k == reflect.Int32 ||
// k == reflect.Int64 ||
// k == reflect.Uint ||
// k == reflect.Uint8 ||
// k == reflect.Uint16 ||
// k == reflect.Uint32 ||
// k == reflect.Uint64 ||
// k == reflect.Uintptr ||
// k == reflect.Float32 ||
// k == reflect.Float64 ||
// k == reflect.Bool ||
// k == reflect.String
}
// these functions must be inlinable, and not call anybody

View File

@ -219,24 +219,3 @@ func growCap(oldCap, unit, num int) (newCap int) {
}
return
}
func expandSliceValue(s reflect.Value, num int) reflect.Value {
if num <= 0 {
return s
}
l0 := s.Len()
l1 := l0 + num // new slice length
if l1 < l0 {
panic("ExpandSlice: slice overflow")
}
c0 := s.Cap()
if l1 <= c0 {
return s.Slice(0, l1)
}
st := s.Type()
c1 := growCap(c0, int(st.Elem().Size()), num)
s2 := reflect.MakeSlice(st, l1, c1)
// println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1)
reflect.Copy(s2, s)
return s2
}

View File

@ -1,13 +1,21 @@
// +build !unsafe
// +build !go1.7 safe appengine
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"reflect"
"sync/atomic"
)
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
//
// Usage: Always maintain a reference to v while result of this call is in use,
// and call keepAlive4BytesView(v) at point where done with view.
func stringView(v []byte) string {
return string(v)
}
@ -15,6 +23,121 @@ func stringView(v []byte) string {
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
//
// Usage: Always maintain a reference to v while result of this call is in use,
// and call keepAlive4BytesView(v) at point where done with view.
func bytesView(v string) []byte {
return []byte(v)
}
// // keepAlive4BytesView maintains a reference to the input parameter for bytesView.
// //
// // Usage: call this at point where done with the bytes view.
// func keepAlive4BytesView(v string) {}
// // keepAlive4BytesView maintains a reference to the input parameter for stringView.
// //
// // Usage: call this at point where done with the string view.
// func keepAlive4StringView(v []byte) {}
func rv2i(rv reflect.Value) interface{} {
return rv.Interface()
}
func rt2id(rt reflect.Type) uintptr {
return reflect.ValueOf(rt).Pointer()
}
// --------------------------
type ptrToRvMap struct{}
func (_ *ptrToRvMap) init() {}
func (_ *ptrToRvMap) get(i interface{}) reflect.Value {
return reflect.ValueOf(i).Elem()
}
// --------------------------
type atomicTypeInfoSlice struct {
v atomic.Value
}
func (x *atomicTypeInfoSlice) load() *[]rtid2ti {
i := x.v.Load()
if i == nil {
return nil
}
return i.(*[]rtid2ti)
}
func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
x.v.Store(p)
}
// --------------------------
func (f *decFnInfo) raw(rv reflect.Value) {
rv.SetBytes(f.d.raw())
}
func (f *decFnInfo) kString(rv reflect.Value) {
rv.SetString(f.d.d.DecodeString())
}
func (f *decFnInfo) kBool(rv reflect.Value) {
rv.SetBool(f.d.d.DecodeBool())
}
func (f *decFnInfo) kFloat32(rv reflect.Value) {
rv.SetFloat(f.d.d.DecodeFloat(true))
}
func (f *decFnInfo) kFloat64(rv reflect.Value) {
rv.SetFloat(f.d.d.DecodeFloat(false))
}
func (f *decFnInfo) kInt(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(intBitsize))
}
func (f *decFnInfo) kInt8(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(8))
}
func (f *decFnInfo) kInt16(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(16))
}
func (f *decFnInfo) kInt32(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(32))
}
func (f *decFnInfo) kInt64(rv reflect.Value) {
rv.SetInt(f.d.d.DecodeInt(64))
}
func (f *decFnInfo) kUint(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUintptr(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUint8(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(8))
}
func (f *decFnInfo) kUint16(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(16))
}
func (f *decFnInfo) kUint32(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(32))
}
func (f *decFnInfo) kUint64(rv reflect.Value) {
rv.SetUint(f.d.d.DecodeUint(64))
}
// func i2rv(i interface{}) reflect.Value {
// return reflect.ValueOf(i)
// }

View File

@ -1,4 +1,6 @@
// +build unsafe
// +build !safe
// +build !appengine
// +build go1.7
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
@ -6,10 +8,15 @@
package codec
import (
"reflect"
"sync/atomic"
"unsafe"
)
// This file has unsafe variants of some helper methods.
// NOTE: See helper_not_unsafe.go for the usage information.
// var zeroRTv [4]uintptr
type unsafeString struct {
Data uintptr
@ -22,9 +29,17 @@ type unsafeSlice struct {
Cap int
}
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
type unsafeIntf struct {
typ unsafe.Pointer
word unsafe.Pointer
}
type unsafeReflectValue struct {
typ unsafe.Pointer
ptr unsafe.Pointer
flag uintptr
}
func stringView(v []byte) string {
if len(v) == 0 {
return ""
@ -35,9 +50,6 @@ func stringView(v []byte) string {
return *(*string)(unsafe.Pointer(&sx))
}
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func bytesView(v string) []byte {
if len(v) == 0 {
return zeroByteSlice
@ -47,3 +59,404 @@ func bytesView(v string) []byte {
bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
return *(*[]byte)(unsafe.Pointer(&bx))
}
// func keepAlive4BytesView(v string) {
// runtime.KeepAlive(v)
// }
// func keepAlive4StringView(v []byte) {
// runtime.KeepAlive(v)
// }
const _unsafe_rv2i_is_safe = false
// TODO: consider a more generally-known optimization for reflect.Value ==> Interface
//
// Currently, we use this fragile method that taps into implememtation details from
// the source go stdlib reflect/value.go,
// and trims the implementation.
func rv2i(rv reflect.Value) interface{} {
if _unsafe_rv2i_is_safe {
return rv.Interface()
}
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// references that are single-words (map, ptr) may be double-referenced as flagIndir
kk := urv.flag & (1<<5 - 1)
if (kk == uintptr(reflect.Map) || kk == uintptr(reflect.Ptr)) && urv.flag&(1<<7) != 0 {
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
}
return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
}
func rt2id(rt reflect.Type) uintptr {
return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
}
// func rv0t(rt reflect.Type) reflect.Value {
// ut := (*unsafeIntf)(unsafe.Pointer(&rt))
// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr
// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())}
// return *(*reflect.Value)(unsafe.Pointer(&uv})
// }
type ptrToRVKV struct {
k uintptr
v reflect.Value
}
type ptrToRvMap struct {
// m map[uintptr]reflect.Value
a [4]ptrToRVKV
v []ptrToRVKV
}
func (p *ptrToRvMap) init() {
// fmt.Printf(">>>> new ptr to rv map\n")
// p.m = make(map[uintptr]reflect.Value, 32)
p.v = p.a[:0]
}
func (p *ptrToRvMap) get(intf interface{}) (rv reflect.Value) {
word := uintptr(((*unsafeIntf)(unsafe.Pointer(&intf))).word)
// binary search. adapted from sort/search.go.
h, i, j := 0, 0, len(p.v)
for i < j {
h = i + (j-i)/2
if p.v[h].k < word {
i = h + 1
} else {
j = h
}
}
if i < len(p.v) && p.v[i].k == word {
return p.v[i].v
}
// insert into position i
// fmt.Printf(">>>> resetting rv for word: %x, interface: %v\n", word, intf)
rv = reflect.ValueOf(intf).Elem()
p.v = append(p.v, ptrToRVKV{})
copy(p.v[i+1:len(p.v)], p.v[i:len(p.v)-1])
p.v[i].k, p.v[i].v = word, rv
return
}
// --------------------------
type atomicTypeInfoSlice struct {
v unsafe.Pointer
}
func (x *atomicTypeInfoSlice) load() *[]rtid2ti {
return (*[]rtid2ti)(atomic.LoadPointer(&x.v))
}
func (x *atomicTypeInfoSlice) store(p *[]rtid2ti) {
atomic.StorePointer(&x.v, unsafe.Pointer(p))
}
// --------------------------
func (f *decFnInfo) raw(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*[]byte)(urv.ptr) = f.d.raw()
}
func (f *decFnInfo) kString(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*string)(urv.ptr) = f.d.d.DecodeString()
}
func (f *decFnInfo) kBool(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*bool)(urv.ptr) = f.d.d.DecodeBool()
}
func (f *decFnInfo) kFloat32(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float32)(urv.ptr) = float32(f.d.d.DecodeFloat(true))
}
func (f *decFnInfo) kFloat64(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*float64)(urv.ptr) = f.d.d.DecodeFloat(false)
}
func (f *decFnInfo) kInt(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int)(urv.ptr) = int(f.d.d.DecodeInt(intBitsize))
}
func (f *decFnInfo) kInt8(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int8)(urv.ptr) = int8(f.d.d.DecodeInt(8))
}
func (f *decFnInfo) kInt16(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int16)(urv.ptr) = int16(f.d.d.DecodeInt(16))
}
func (f *decFnInfo) kInt32(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int32)(urv.ptr) = int32(f.d.d.DecodeInt(32))
}
func (f *decFnInfo) kInt64(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*int64)(urv.ptr) = f.d.d.DecodeInt(64)
}
func (f *decFnInfo) kUint(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint)(urv.ptr) = uint(f.d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUintptr(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uintptr)(urv.ptr) = uintptr(f.d.d.DecodeUint(uintBitsize))
}
func (f *decFnInfo) kUint8(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint8)(urv.ptr) = uint8(f.d.d.DecodeUint(8))
}
func (f *decFnInfo) kUint16(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint16)(urv.ptr) = uint16(f.d.d.DecodeUint(16))
}
func (f *decFnInfo) kUint32(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint32)(urv.ptr) = uint32(f.d.d.DecodeUint(32))
}
func (f *decFnInfo) kUint64(rv reflect.Value) {
urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
*(*uint64)(urv.ptr) = f.d.d.DecodeUint(64)
}
// func (p *ptrToRvMap) get(i interface{}) (rv reflect.Value) {
// word := uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).word)
// rv, exists := p.m[word]
// if !exists {
// fmt.Printf(">>>> resetting rv for word: %x, interface: %v\n", word, i)
// rv = reflect.ValueOf(i).Elem()
// p.m[word] = rv
// }
// return
// }
// func rt2id(rt reflect.Type) uintptr {
// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word)
// // var i interface{} = rt
// // // ui := (*unsafeIntf)(unsafe.Pointer(&i))
// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word
// }
// func rv2i(rv reflect.Value) interface{} {
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // non-reference type: already indir
// // reference type: depend on flagIndir property ('cos maybe was double-referenced)
// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 )
// // rvk := reflect.Kind(urv.flag & (1<<5 - 1))
// // if (rvk == reflect.Chan ||
// // rvk == reflect.Func ||
// // rvk == reflect.Interface ||
// // rvk == reflect.Map ||
// // rvk == reflect.Ptr ||
// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 {
// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// // }
// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 {
// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type())
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// }
// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type())
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// }
// const (
// unsafeRvFlagKindMask = 1<<5 - 1
// unsafeRvKindDirectIface = 1 << 5
// unsafeRvFlagIndir = 1 << 7
// unsafeRvFlagAddr = 1 << 8
// unsafeRvFlagMethod = 1 << 9
// _USE_RV_INTERFACE bool = false
// _UNSAFE_RV_DEBUG = true
// )
// type unsafeRtype struct {
// _ [2]uintptr
// _ uint32
// _ uint8
// _ uint8
// _ uint8
// kind uint8
// _ [2]uintptr
// _ int32
// }
// func _rv2i(rv reflect.Value) interface{} {
// // Note: From use,
// // - it's never an interface
// // - the only calls here are for ifaceIndir types.
// // (though that conditional is wrong)
// // To know for sure, we need the value of t.kind (which is not exposed).
// //
// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct)
// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string
// // - Type Direct, Value indirect: ==> map???
// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map
// //
// // TRANSLATES TO:
// // if typeIndirect { } else if valueIndirect { } else { }
// //
// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored.
// if _USE_RV_INTERFACE {
// return rv.Interface()
// }
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
// // println("***** IS flag method or interface: delegating to rv.Interface()")
// // return rv.Interface()
// // }
// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) {
// // println("***** IS Interface: delegate to rv.Interface")
// // return rv.Interface()
// // }
// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 {
// // if urv.flag&unsafeRvFlagAddr == 0 {
// // println("***** IS ifaceIndir typ")
// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ}
// // // return *(*interface{})(unsafe.Pointer(&ui))
// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// // }
// // } else if urv.flag&unsafeRvFlagIndir != 0 {
// // println("***** IS flagindir")
// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// // } else {
// // println("***** NOT flagindir")
// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// // }
// // println("***** default: delegate to rv.Interface")
// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ))
// if _UNSAFE_RV_DEBUG {
// fmt.Printf(">>>> start: %v: ", rv.Type())
// fmt.Printf("%v - %v\n", *urv, *urt)
// }
// if urt.kind&unsafeRvKindDirectIface == 0 {
// if _UNSAFE_RV_DEBUG {
// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type())
// }
// // println("***** IS ifaceIndir typ")
// // if true || urv.flag&unsafeRvFlagAddr == 0 {
// // // println(" ***** IS NOT addr")
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// // }
// } else if urv.flag&unsafeRvFlagIndir != 0 {
// if _UNSAFE_RV_DEBUG {
// fmt.Printf("**** +flagIndir type: %v\n", rv.Type())
// }
// // println("***** IS flagindir")
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ}))
// } else {
// if _UNSAFE_RV_DEBUG {
// fmt.Printf("**** -flagIndir type: %v\n", rv.Type())
// }
// // println("***** NOT flagindir")
// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ}))
// }
// // println("***** default: delegating to rv.Interface()")
// // return rv.Interface()
// }
// var staticM0 = make(map[string]uint64)
// var staticI0 = (int32)(-5)
// func staticRv2iTest() {
// i0 := (int32)(-5)
// m0 := make(map[string]uint16)
// m0["1"] = 1
// for _, i := range []interface{}{
// (int)(7),
// (uint)(8),
// (int16)(-9),
// (uint16)(19),
// (uintptr)(77),
// (bool)(true),
// float32(-32.7),
// float64(64.9),
// complex(float32(19), 5),
// complex(float64(-32), 7),
// [4]uint64{1, 2, 3, 4},
// (chan<- int)(nil), // chan,
// rv2i, // func
// io.Writer(ioutil.Discard),
// make(map[string]uint),
// (map[string]uint)(nil),
// staticM0,
// m0,
// &m0,
// i0,
// &i0,
// &staticI0,
// &staticM0,
// []uint32{6, 7, 8},
// "abc",
// Raw{},
// RawExt{},
// &Raw{},
// &RawExt{},
// unsafe.Pointer(&i0),
// } {
// i2 := rv2i(reflect.ValueOf(i))
// eq := reflect.DeepEqual(i, i2)
// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq)
// }
// // os.Exit(0)
// }
// func init() {
// staticRv2iTest()
// }
// func rv2i(rv reflect.Value) interface{} {
// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() {
// return rv.Interface()
// }
// // var i interface{}
// // ui := (*unsafeIntf)(unsafe.Pointer(&i))
// var ui unsafeIntf
// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr))
// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 {
// if urv.flag&unsafeRvFlagAddr != 0 {
// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()")
// return rv.Interface()
// }
// println("****** indirect type/kind")
// ui.word = urv.ptr
// } else if urv.flag&unsafeRvFlagIndir != 0 {
// println("****** unsafe rv flag indir")
// ui.word = *(*unsafe.Pointer)(urv.ptr)
// } else {
// println("****** default: assign prt to word directly")
// ui.word = urv.ptr
// }
// // ui.word = urv.ptr
// ui.typ = urv.typ
// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word)
// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word))
// return *(*interface{})(unsafe.Pointer(&ui))
// // return i
// }
// func i2rv(i interface{}) reflect.Value {
// // u := *(*unsafeIntf)(unsafe.Pointer(&i))
// return reflect.ValueOf(i)
// }

View File

@ -34,7 +34,6 @@ package codec
import (
"bytes"
"encoding/base64"
"fmt"
"reflect"
"strconv"
"unicode/utf16"
@ -59,6 +58,11 @@ var (
// jsonTabs and jsonSpaces are used as caches for indents
jsonTabs, jsonSpaces string
jsonCharHtmlSafeSet [utf8.RuneSelf]bool
jsonCharSafeSet [utf8.RuneSelf]bool
jsonCharWhitespaceSet [256]bool
jsonNumSet [256]bool
)
const (
@ -78,19 +82,6 @@ const (
// P.S. Do not expect a significant decoding boost from this.
jsonValidateSymbols = true
// if jsonTruncateMantissa, truncate mantissa if trailing 0's.
// This is important because it could allow some floats to be decoded without
// deferring to strconv.ParseFloat.
jsonTruncateMantissa = true
// if mantissa >= jsonNumUintCutoff before multiplying by 10, this is an overflow
jsonNumUintCutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
// if mantissa >= jsonNumUintMaxVal, this is an overflow
jsonNumUintMaxVal = 1<<uint64(64) - 1
// jsonNumDigitsUint64Largest = 19
jsonSpacesOrTabsLen = 128
)
@ -105,6 +96,31 @@ func init() {
bs[i] = '\t'
}
jsonTabs = string(bs[:])
// populate the safe values as true: note: ASCII control characters are (0-31)
// jsonCharSafeSet: all true except (0-31) " \
// jsonCharHtmlSafeSet: all true except (0-31) " \ < > &
for i := 32; i < utf8.RuneSelf; i++ {
switch i {
case '"', '\\':
jsonCharSafeSet[i] = false
jsonCharHtmlSafeSet[i] = false
case '<', '>', '&':
jsonCharHtmlSafeSet[i] = false
jsonCharSafeSet[i] = true
default:
jsonCharSafeSet[i] = true
jsonCharHtmlSafeSet[i] = true
}
}
for i := 0; i < 256; i++ {
switch i {
case ' ', '\t', '\r', '\n':
jsonCharWhitespaceSet[i] = true
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-':
jsonNumSet[i] = true
}
}
}
type jsonEncDriver struct {
@ -214,6 +230,7 @@ func (e *jsonEncDriver) encodeFloat(f float64, numbits int) {
}
func (e *jsonEncDriver) EncodeInt(v int64) {
// if e.h.IntegerAsString == 'A' || e.h.IntegerAsString == 'L' && (v > 1<<53 || v < -(1<<53)) {
if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) {
e.w.writen1('"')
e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
@ -224,6 +241,7 @@ func (e *jsonEncDriver) EncodeInt(v int64) {
}
func (e *jsonEncDriver) EncodeUint(v uint64) {
// if e.h.IntegerAsString == 'A' || e.h.IntegerAsString == 'L' && v > 1<<53 {
if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 {
e.w.writen1('"')
e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
@ -310,8 +328,11 @@ func (e *jsonEncDriver) quoteStr(s string) {
w.writen1('"')
start := 0
for i := 0; i < len(s); {
// encode all bytes < 0x20 (except \r, \n).
// also encode < > & to prevent security holes when served to some browsers.
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
// if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
if jsonCharHtmlSafeSet[b] || (e.h.HTMLCharsAsIs && jsonCharSafeSet[b]) {
i++
continue
}
@ -332,8 +353,6 @@ func (e *jsonEncDriver) quoteStr(s string) {
case '\t':
w.writen2('\\', 't')
default:
// encode all bytes < 0x20 (except \r, \n).
// also encode < > & to prevent security holes when served to some browsers.
w.writestr(`\u00`)
w.writen2(hex[b>>4], hex[b&0xF])
}
@ -352,7 +371,7 @@ func (e *jsonEncDriver) quoteStr(s string) {
continue
}
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
// Both technically valid JSON, but bomb on JSONP, so fix here.
// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
if c == '\u2028' || c == '\u2029' {
if start < i {
w.writestr(s[start:i])
@ -371,88 +390,6 @@ func (e *jsonEncDriver) quoteStr(s string) {
w.writen1('"')
}
//--------------------------------
type jsonNum struct {
// bytes []byte // may have [+-.eE0-9]
mantissa uint64 // where mantissa ends, and maybe dot begins.
exponent int16 // exponent value.
manOverflow bool
neg bool // started with -. No initial sign in the bytes above.
dot bool // has dot
explicitExponent bool // explicit exponent
}
func (x *jsonNum) reset() {
x.manOverflow = false
x.neg = false
x.dot = false
x.explicitExponent = false
x.mantissa = 0
x.exponent = 0
}
// uintExp is called only if exponent > 0.
func (x *jsonNum) uintExp() (n uint64, overflow bool) {
n = x.mantissa
e := x.exponent
if e >= int16(len(jsonUint64Pow10)) {
overflow = true
return
}
n *= jsonUint64Pow10[e]
if n < x.mantissa || n > jsonNumUintMaxVal {
overflow = true
return
}
return
// for i := int16(0); i < e; i++ {
// if n >= jsonNumUintCutoff {
// overflow = true
// return
// }
// n *= 10
// }
// return
}
// these constants are only used withn floatVal.
// They are brought out, so that floatVal can be inlined.
const (
jsonUint64MantissaBits = 52
jsonMaxExponent = int16(len(jsonFloat64Pow10)) - 1
)
func (x *jsonNum) floatVal() (f float64, parseUsingStrConv bool) {
// We do not want to lose precision.
// Consequently, we will delegate to strconv.ParseFloat if any of the following happen:
// - There are more digits than in math.MaxUint64: 18446744073709551615 (20 digits)
// We expect up to 99.... (19 digits)
// - The mantissa cannot fit into a 52 bits of uint64
// - The exponent is beyond our scope ie beyong 22.
parseUsingStrConv = x.manOverflow ||
x.exponent > jsonMaxExponent ||
(x.exponent < 0 && -(x.exponent) > jsonMaxExponent) ||
x.mantissa>>jsonUint64MantissaBits != 0
if parseUsingStrConv {
return
}
// all good. so handle parse here.
f = float64(x.mantissa)
// fmt.Printf(".Float: uint64 value: %v, float: %v\n", m, f)
if x.neg {
f = -f
}
if x.exponent > 0 {
f *= jsonFloat64Pow10[x.exponent]
} else if x.exponent < 0 {
f /= jsonFloat64Pow10[-x.exponent]
}
return
}
type jsonDecDriver struct {
noBuiltInTypes
d *Decoder
@ -470,26 +407,14 @@ type jsonDecDriver struct {
se setExtWrapper
n jsonNum
// n jsonNum
}
func jsonIsWS(b byte) bool {
return b == ' ' || b == '\t' || b == '\r' || b == '\n'
// return b == ' ' || b == '\t' || b == '\r' || b == '\n'
return jsonCharWhitespaceSet[b]
}
// // This will skip whitespace characters and return the next byte to read.
// // The next byte determines what the value will be one of.
// func (d *jsonDecDriver) skipWhitespace() {
// // fast-path: do not enter loop. Just check first (in case no whitespace).
// b := d.r.readn1()
// if jsonIsWS(b) {
// r := d.r
// for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
// }
// }
// d.tok = b
// }
func (d *jsonDecDriver) uncacheRead() {
if d.tok != 0 {
d.r.unreadn1()
@ -499,11 +424,7 @@ func (d *jsonDecDriver) uncacheRead() {
func (d *jsonDecDriver) sendContainerState(c containerState) {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
var xc uint8 // char expected
if c == containerMapKey {
@ -532,37 +453,23 @@ func (d *jsonDecDriver) sendContainerState(c containerState) {
func (d *jsonDecDriver) CheckBreak() bool {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if d.tok == '}' || d.tok == ']' {
// d.tok = 0 // only checking, not consuming
return true
}
return false
return d.tok == '}' || d.tok == ']'
}
func (d *jsonDecDriver) readStrIdx(fromIdx, toIdx uint8) {
bs := d.r.readx(int(toIdx - fromIdx))
d.tok = 0
if jsonValidateSymbols {
if !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
return
}
if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:toIdx]) {
d.d.errorf("json: expecting %s: got %s", jsonLiterals[fromIdx:toIdx], bs)
return
}
}
func (d *jsonDecDriver) TryDecodeAsNil() bool {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if d.tok == 'n' {
d.readStrIdx(10, 13) // ull
@ -573,11 +480,7 @@ func (d *jsonDecDriver) TryDecodeAsNil() bool {
func (d *jsonDecDriver) DecodeBool() bool {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if d.tok == 'f' {
d.readStrIdx(5, 9) // alse
@ -593,11 +496,7 @@ func (d *jsonDecDriver) DecodeBool() bool {
func (d *jsonDecDriver) ReadMapStart() int {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if d.tok != '{' {
d.d.errorf("json: expect char '%c' but got char '%c'", '{', d.tok)
@ -609,11 +508,7 @@ func (d *jsonDecDriver) ReadMapStart() int {
func (d *jsonDecDriver) ReadArrayStart() int {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if d.tok != '[' {
d.d.errorf("json: expect char '%c' but got char '%c'", '[', d.tok)
@ -626,11 +521,7 @@ func (d *jsonDecDriver) ReadArrayStart() int {
func (d *jsonDecDriver) ContainerType() (vt valueType) {
// check container type by checking the first char
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if b := d.tok; b == '{' {
return valueTypeMap
@ -646,260 +537,57 @@ func (d *jsonDecDriver) ContainerType() (vt valueType) {
// return false // "unreachable"
}
func (d *jsonDecDriver) decNum(storeBytes bool) {
// If it is has a . or an e|E, decode as a float; else decode as an int.
func (d *jsonDecDriver) decNumBytes() (bs []byte) {
// stores num bytes in d.bs
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
b := d.tok
var str bool
if b == '"' {
str = true
b = d.r.readn1()
}
if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
d.d.errorf("json: decNum: got first char '%c'", b)
return
if d.tok == '"' {
bs = d.r.readUntil(d.b2[:0], '"')
bs = bs[:len(bs)-1]
} else {
d.r.unreadn1()
bs = d.r.readTo(d.bs[:0], &jsonNumSet)
// bs = d.r.readbUntilAny(d.bs[:0], " \t\n:,{}[]")
}
d.tok = 0
// fmt.Printf(">>>> decNumBytes: returning: '%s'\n", bs)
return bs
}
const cutoff = (1<<64-1)/uint64(10) + 1 // cutoff64(base)
const jsonNumUintMaxVal = 1<<uint64(64) - 1
n := &d.n
r := d.r
n.reset()
d.bs = d.bs[:0]
if str && storeBytes {
d.bs = append(d.bs, '"')
func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
bs := d.decNumBytes()
u, err := strconv.ParseUint(stringView(bs), 10, int(bitsize))
if err != nil {
d.d.errorf("json: decode uint from %s: %v", bs, err)
return
}
// The format of a number is as below:
// parsing: sign? digit* dot? digit* e? sign? digit*
// states: 0 1* 2 3* 4 5* 6 7
// We honor this state so we can break correctly.
var state uint8 = 0
var eNeg bool
var e int16
var eof bool
LOOP:
for !eof {
// fmt.Printf("LOOP: b: %q\n", b)
switch b {
case '+':
switch state {
case 0:
state = 2
// do not add sign to the slice ...
b, eof = r.readn1eof()
continue
case 6: // typ = jsonNumFloat
state = 7
default:
break LOOP
}
case '-':
switch state {
case 0:
state = 2
n.neg = true
// do not add sign to the slice ...
b, eof = r.readn1eof()
continue
case 6: // typ = jsonNumFloat
eNeg = true
state = 7
default:
break LOOP
}
case '.':
switch state {
case 0, 2: // typ = jsonNumFloat
state = 4
n.dot = true
default:
break LOOP
}
case 'e', 'E':
switch state {
case 0, 2, 4: // typ = jsonNumFloat
state = 6
// n.mantissaEndIndex = int16(len(n.bytes))
n.explicitExponent = true
default:
break LOOP
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
switch state {
case 0:
state = 2
fallthrough
case 2:
fallthrough
case 4:
if n.dot {
n.exponent--
}
if n.mantissa >= jsonNumUintCutoff {
n.manOverflow = true
break
}
v := uint64(b - '0')
n.mantissa *= 10
if v != 0 {
n1 := n.mantissa + v
if n1 < n.mantissa || n1 > jsonNumUintMaxVal {
n.manOverflow = true // n+v overflows
break
}
n.mantissa = n1
}
case 6:
state = 7
fallthrough
case 7:
if !(b == '0' && e == 0) {
e = e*10 + int16(b-'0')
}
default:
break LOOP
}
case '"':
if str {
if storeBytes {
d.bs = append(d.bs, '"')
}
b, eof = r.readn1eof()
}
break LOOP
default:
break LOOP
}
if storeBytes {
d.bs = append(d.bs, b)
}
b, eof = r.readn1eof()
}
if jsonTruncateMantissa && n.mantissa != 0 {
for n.mantissa%10 == 0 {
n.mantissa /= 10
n.exponent++
}
}
if e != 0 {
if eNeg {
n.exponent -= e
} else {
n.exponent += e
}
}
// d.n = n
if !eof {
if jsonUnreadAfterDecNum {
r.unreadn1()
} else {
if !jsonIsWS(b) {
d.tok = b
}
}
}
// fmt.Printf("1: n: bytes: %s, neg: %v, dot: %v, exponent: %v, mantissaEndIndex: %v\n",
// n.bytes, n.neg, n.dot, n.exponent, n.mantissaEndIndex)
return
}
func (d *jsonDecDriver) DecodeInt(bitsize uint8) (i int64) {
d.decNum(false)
n := &d.n
if n.manOverflow {
d.d.errorf("json: overflow integer after: %v", n.mantissa)
bs := d.decNumBytes()
// if bytes.ContainsAny(bs, ".eE") {
// d.d.errorf("json: decoding int, but found one or more of the chars: .eE: %s", bs)
// return
// }
i, err := strconv.ParseInt(stringView(bs), 10, int(bitsize))
if err != nil {
d.d.errorf("json: decode int from %s: %v", bs, err)
return
}
var u uint64
if n.exponent == 0 {
u = n.mantissa
} else if n.exponent < 0 {
d.d.errorf("json: fractional integer")
return
} else if n.exponent > 0 {
var overflow bool
if u, overflow = n.uintExp(); overflow {
d.d.errorf("json: overflow integer")
return
}
}
i = int64(u)
if n.neg {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
return
}
// fmt.Printf("DecodeInt: %v\n", i)
return
}
// floatVal MUST only be called after a decNum, as d.bs now contains the bytes of the number
func (d *jsonDecDriver) floatVal() (f float64) {
f, useStrConv := d.n.floatVal()
if useStrConv {
var err error
if f, err = strconv.ParseFloat(stringView(d.bs), 64); err != nil {
panic(fmt.Errorf("parse float: %s, %v", d.bs, err))
}
if d.n.neg {
f = -f
}
}
return
}
func (d *jsonDecDriver) DecodeUint(bitsize uint8) (u uint64) {
d.decNum(false)
n := &d.n
if n.neg {
d.d.errorf("json: unsigned integer cannot be negative")
return
}
if n.manOverflow {
d.d.errorf("json: overflow integer after: %v", n.mantissa)
return
}
if n.exponent == 0 {
u = n.mantissa
} else if n.exponent < 0 {
d.d.errorf("json: fractional integer")
return
} else if n.exponent > 0 {
var overflow bool
if u, overflow = n.uintExp(); overflow {
d.d.errorf("json: overflow integer")
return
}
}
if chkOvf.Uint(u, bitsize) {
d.d.errorf("json: overflow %v bits: %s", bitsize, d.bs)
return
}
// fmt.Printf("DecodeUint: %v\n", u)
return
}
func (d *jsonDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
d.decNum(true)
f = d.floatVal()
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("json: overflow float32: %v, %s", f, d.bs)
bs := d.decNumBytes()
bitsize := 64
if chkOverflow32 {
bitsize = 32
}
f, err := strconv.ParseFloat(stringView(bs), bitsize)
if err != nil {
d.d.errorf("json: decode float from %s: %v", bs, err)
return
}
return
@ -918,19 +606,14 @@ func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxta
return
}
func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
// if decoding into raw bytes, and the RawBytesExt is configured, use it to decode.
if !isstring && d.se.i != nil {
if d.se.i != nil {
bsOut = bs
d.DecodeExt(&bsOut, 0, &d.se)
return
}
d.appendStringAsBytes()
// if isstring, then just return the bytes, even if it is using the scratch buffer.
// the bytes will be converted to a string as needed.
if isstring {
return d.bs
}
// if appendStringAsBytes returned a zero-len slice, then treat as nil.
// This should only happen for null, and "".
if len(d.bs) == 0 {
@ -956,86 +639,110 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
return
}
const jsonAlwaysReturnInternString = false
func (d *jsonDecDriver) DecodeString() (s string) {
d.appendStringAsBytes()
// if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key
if d.c == containerMapKey {
if jsonAlwaysReturnInternString || d.c == containerMapKey {
return d.d.string(d.bs)
}
return string(d.bs)
}
func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) {
d.appendStringAsBytes()
return d.bs
}
func (d *jsonDecDriver) appendStringAsBytes() {
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
}
// handle null as a string
if d.tok == 'n' {
d.readStrIdx(10, 13) // ull
d.bs = d.bs[:0]
return
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
if d.tok != '"' {
d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
// d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
// handle non-string scalar: null, true, false or a number
switch d.tok {
case 'n':
d.readStrIdx(10, 13) // ull
d.bs = d.bs[:0]
case 'f':
d.readStrIdx(5, 9) // alse
d.bs = d.bs[:5]
copy(d.bs, "false")
case 't':
d.readStrIdx(1, 4) // rue
d.bs = d.bs[:4]
copy(d.bs, "true")
default:
// try to parse a valid number
bs := d.decNumBytes()
d.bs = d.bs[:len(bs)]
copy(d.bs, bs)
}
return
}
d.tok = 0
v := d.bs[:0]
var c uint8
d.tok = 0
r := d.r
for {
c = r.readn1()
if c == '"' {
var cs []byte
v := d.bs[:0]
// var c uint8
for i := 0; ; i++ {
if i == len(cs) {
cs = r.readUntil(d.b2[:0], '"')
i = 0
}
if cs[i] == '"' {
break
} else if c == '\\' {
c = r.readn1()
switch c {
case '"', '\\', '/', '\'':
v = append(v, c)
case 'b':
v = append(v, '\b')
case 'f':
v = append(v, '\f')
case 'n':
v = append(v, '\n')
case 'r':
v = append(v, '\r')
case 't':
v = append(v, '\t')
case 'u':
rr := d.jsonU4(false)
// fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
if utf16.IsSurrogate(rr) {
rr = utf16.DecodeRune(rr, d.jsonU4(true))
}
if cs[i] != '\\' {
v = append(v, cs[i])
continue
}
// cs[i] == '\\'
i++
switch cs[i] {
case '"', '\\', '/', '\'':
v = append(v, cs[i])
case 'b':
v = append(v, '\b')
case 'f':
v = append(v, '\f')
case 'n':
v = append(v, '\n')
case 'r':
v = append(v, '\r')
case 't':
v = append(v, '\t')
case 'u':
rr := d.jsonU4Arr([4]byte{cs[i+1], cs[i+2], cs[i+3], cs[i+4]})
i += 4
// fmt.Printf("$$$$$$$$$: is surrogate: %v\n", utf16.IsSurrogate(rr))
if utf16.IsSurrogate(rr) {
// fmt.Printf(">>>> checking utf16 surrogate\n")
if !(cs[i+1] == '\\' && cs[i+2] == 'u') {
d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
return
}
w2 := utf8.EncodeRune(d.bstr[:], rr)
v = append(v, d.bstr[:w2]...)
default:
d.d.errorf("json: unsupported escaped value: %c", c)
i += 2
rr = utf16.DecodeRune(rr, d.jsonU4Arr([4]byte{cs[i+1], cs[i+2], cs[i+3], cs[i+4]}))
i += 4
}
} else {
v = append(v, c)
w2 := utf8.EncodeRune(d.bstr[:], rr)
v = append(v, d.bstr[:w2]...)
default:
d.d.errorf("json: unsupported escaped value: %c", cs[i])
}
}
d.bs = v
}
func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
r := d.r
if checkSlashU && !(r.readn1() == '\\' && r.readn1() == 'u') {
d.d.errorf(`json: unquoteStr: invalid unicode sequence. Expecting \u`)
return 0
}
func (d *jsonDecDriver) jsonU4Arr(bs [4]byte) (r rune) {
// u, _ := strconv.ParseUint(string(d.bstr[:4]), 16, 64)
var u uint32
for i := 0; i < 4; i++ {
v := r.readn1()
for _, v := range bs {
if '0' <= v && v <= '9' {
v = v - '0'
} else if 'a' <= v && v <= 'z' {
@ -1048,6 +755,7 @@ func (d *jsonDecDriver) jsonU4(checkSlashU bool) rune {
}
u = u*16 + uint32(v)
}
// fmt.Printf(">>>>>>>> jsonU4Arr: %v, %s\n", rune(u), string(rune(u)))
return rune(u)
}
@ -1056,11 +764,7 @@ func (d *jsonDecDriver) DecodeNaked() {
// var decodeFurther bool
if d.tok == 0 {
var b byte
r := d.r
for b = r.readn1(); jsonIsWS(b); b = r.readn1() {
}
d.tok = b
d.tok = d.r.skip(&jsonCharWhitespaceSet)
}
switch d.tok {
case 'n':
@ -1086,41 +790,35 @@ func (d *jsonDecDriver) DecodeNaked() {
z.v = valueTypeString
z.s = d.DecodeString()
default: // number
d.decNum(true)
n := &d.n
// if the string had a any of [.eE], then decode as float.
switch {
case n.explicitExponent, n.dot, n.exponent < 0, n.manOverflow:
bs := d.decNumBytes()
var err error
if len(bs) == 0 {
d.d.errorf("json: decode number from empty string")
return
} else if d.h.PreferFloat ||
bytes.IndexByte(bs, '.') != -1 ||
bytes.IndexByte(bs, 'e') != -1 ||
bytes.IndexByte(bs, 'E') != -1 {
// } else if d.h.PreferFloat || bytes.ContainsAny(bs, ".eE") {
z.v = valueTypeFloat
z.f = d.floatVal()
case n.exponent == 0:
u := n.mantissa
switch {
case n.neg:
z.v = valueTypeInt
z.i = -int64(u)
case d.h.SignedInteger:
z.v = valueTypeInt
z.i = int64(u)
default:
z.v = valueTypeUint
z.u = u
z.f, err = strconv.ParseFloat(stringView(bs), 64)
} else if d.h.SignedInteger || bs[0] == '-' {
z.v = valueTypeInt
z.i, err = strconv.ParseInt(stringView(bs), 10, 64)
} else {
z.v = valueTypeUint
z.u, err = strconv.ParseUint(stringView(bs), 10, 64)
}
if err != nil {
if z.v == valueTypeInt || z.v == valueTypeUint {
if v, ok := err.(*strconv.NumError); ok && (v.Err == strconv.ErrRange || v.Err == strconv.ErrSyntax) {
z.v = valueTypeFloat
z.f, err = strconv.ParseFloat(stringView(bs), 64)
}
}
default:
u, overflow := n.uintExp()
switch {
case overflow:
z.v = valueTypeFloat
z.f = d.floatVal()
case n.neg:
z.v = valueTypeInt
z.i = -int64(u)
case d.h.SignedInteger:
z.v = valueTypeInt
z.i = int64(u)
default:
z.v = valueTypeUint
z.u = u
if err != nil {
d.d.errorf("json: decode number from %s: %v", bs, err)
return
}
}
// fmt.Printf("DecodeNaked: Number: %T, %v\n", v, v)
@ -1131,6 +829,14 @@ func (d *jsonDecDriver) DecodeNaked() {
return
}
// func jsonAcceptNonWS(b byte) bool {
// return !jsonCharWhitespaceSet[b]
// }
// func jsonAcceptDQuote(b byte) bool {
// return b == '"'
// }
//----------------------
// JsonHandle is a handle for JSON encoding format.
@ -1152,6 +858,7 @@ func (d *jsonDecDriver) DecodeNaked() {
type JsonHandle struct {
textEncodingType
BasicHandle
// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
// If not configured, raw bytes are encoded to/from base64 text.
RawBytesExt InterfaceExt
@ -1173,6 +880,17 @@ type JsonHandle struct {
// containing the exact integer representation as a decimal.
// - else encode all integers as a json number (default)
IntegerAsString uint8
// HTMLCharsAsIs controls how to encode some special characters to html: < > &
//
// By default, we encode them as \uXXX
// to prevent security holes when served from some browsers.
HTMLCharsAsIs bool
// PreferFloat says that we will default to decoding a number as a float.
// If not set, we will examine the characters of the number and decode as an
// integer type if it doesn't have any of the characters [.eE].
PreferFloat bool
}
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
@ -1221,7 +939,7 @@ func (d *jsonDecDriver) reset() {
d.bs = d.bs[:0]
}
d.c, d.tok = 0, 0
d.n.reset()
// d.n.reset()
}
var jsonEncodeTerminate = []byte{' '}

View File

@ -349,11 +349,11 @@ func (d *msgpackDecDriver) DecodeNaked() {
n.s = d.DecodeString()
} else {
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
n.l = d.DecodeBytes(nil, false)
}
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
n.l = d.DecodeBytes(nil, false)
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
n.v = valueTypeArray
decodeFurther = true
@ -525,12 +525,11 @@ func (d *msgpackDecDriver) DecodeBool() (b bool) {
return
}
func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
var clen int
// ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
clen = d.readContainerLen(msgpackContainerBin)
} else {
@ -549,11 +548,15 @@ func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOu
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *msgpackDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
return string(d.DecodeBytes(d.b[:], true))
}
func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.b[:], true)
}
func (d *msgpackDecDriver) readNextBd() {
@ -569,6 +572,9 @@ func (d *msgpackDecDriver) uncacheRead() {
}
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
bd := d.bd
if bd == mpNil {
return valueTypeNil
@ -621,10 +627,16 @@ func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int)
}
func (d *msgpackDecDriver) ReadMapStart() int {
if !d.bdRead {
d.readNextBd()
}
return d.readContainerLen(msgpackContainerMap)
}
func (d *msgpackDecDriver) ReadArrayStart() int {
if !d.bdRead {
d.readNextBd()
}
return d.readContainerLen(msgpackContainerList)
}
@ -678,10 +690,10 @@ func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs
}
xbd := d.bd
if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
xbs = d.DecodeBytes(nil, false, true)
xbs = d.DecodeBytes(nil, true)
} else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
(xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
xbs = d.DecodeBytes(nil, true, true)
xbs = d.DecodeStringAsBytes()
} else {
clen := d.readExtLen()
xtag = d.r.readn1()
@ -727,7 +739,7 @@ func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
}
func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (e *msgpackEncDriver) reset() {
@ -735,7 +747,7 @@ func (e *msgpackEncDriver) reset() {
}
func (d *msgpackDecDriver) reset() {
d.r = d.d.r
d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false
}

View File

@ -105,10 +105,9 @@ func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.
func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
func (h *noopDrv) DecodeStringAsBytes() []byte { return h.DecodeBytes(nil, true) }
// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
func (h *noopDrv) DecodeBytes(bs []byte, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
func (h *noopDrv) ReadEnd() { h.end() }

View File

@ -1,3 +0,0 @@
package codec
//go:generate bash prebuild.sh

View File

@ -174,6 +174,9 @@ func (d *simpleDecDriver) uncacheRead() {
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
return valueTypeNil
} else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
@ -315,11 +318,17 @@ func (d *simpleDecDriver) DecodeBool() (b bool) {
}
func (d *simpleDecDriver) ReadMapStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayStart() (length int) {
if !d.bdRead {
d.readNextBd()
}
d.bdRead = false
return d.decLen()
}
@ -352,10 +361,14 @@ func (d *simpleDecDriver) decLen() int {
}
func (d *simpleDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
return string(d.DecodeBytes(d.b[:], true))
}
func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) {
return d.DecodeBytes(d.b[:], true)
}
func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
@ -372,7 +385,7 @@ func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs)
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
@ -406,7 +419,7 @@ func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs [
}
xbs = d.r.readx(l)
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil, false, true)
xbs = d.DecodeBytes(nil, true)
default:
d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
return
@ -454,7 +467,7 @@ func (d *simpleDecDriver) DecodeNaked() {
n.s = d.DecodeString()
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
n.l = d.DecodeBytes(nil, false)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
n.v = valueTypeExt
l := d.decLen()
@ -510,7 +523,7 @@ func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
}
func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes}
}
func (e *simpleEncDriver) reset() {
@ -518,7 +531,7 @@ func (e *simpleEncDriver) reset() {
}
func (d *simpleDecDriver) reset() {
d.r = d.d.r
d.r, d.br = d.d.r, d.d.bytes
d.bd, d.bdRead = 0, false
}

View File

@ -5,23 +5,10 @@ package codec
import (
"fmt"
"reflect"
"time"
)
var (
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) {
defer panicToErr(&err)
bs = timeExt{}.WriteExt(rv.Interface())
return
}
timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) {
defer panicToErr(&err)
timeExt{}.ReadExt(rv.Interface(), bs)
return
}
)
var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
type timeExt struct{}

426
cmd/vendor/github.com/ugorji/go/codec/xml.go generated vendored Normal file
View File

@ -0,0 +1,426 @@
// +build ignore
package codec
import "reflect"
/*
A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder.
We are attempting this due to perceived issues with encoding/xml:
- Complicated. It tried to do too much, and is not as simple to use as json.
- Due to over-engineering, reflection is over-used AND performance suffers:
java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/
even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html
codec framework will offer the following benefits
- VASTLY improved performance (when using reflection-mode or codecgen)
- simplicity and consistency: with the rest of the supported formats
- all other benefits of codec framework (streaming, codegeneration, etc)
codec is not a drop-in replacement for encoding/xml.
It is a replacement, based on the simplicity and performance of codec.
Look at it like JAXB for Go.
Challenges:
- Need to output XML preamble, with all namespaces at the right location in the output.
- Each "end" block is dynamic, so we need to maintain a context-aware stack
- How to decide when to use an attribute VS an element
- How to handle chardata, attr, comment EXPLICITLY.
- Should it output fragments?
e.g. encoding a bool should just output true OR false, which is not well-formed XML.
Extend the struct tag. See representative example:
type X struct {
ID uint8 codec:"xid|http://ugorji.net/x-namespace id,omitempty,toarray,attr,cdata"
}
Based on this, we encode
- fields as elements, BUT encode as attributes if struct tag contains ",attr".
- text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata".
In this mode, we only encode as attribute if ",attr" is found, and only encode as CDATA
if ",cdata" is found in the struct tag.
To handle namespaces:
- XMLHandle is denoted as being namespace-aware.
Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name.
- *Encoder and *Decoder know whether the Handle "prefers" namespaces.
- add *Encoder.getEncName(*structFieldInfo).
No one calls *structFieldInfo.indexForEncName directly anymore
- add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc
No one accesses .encName anymore except in
- let encode.go and decode.go use these (for consistency)
- only problem exists for gen.go, where we create a big switch on encName.
Now, we also have to add a switch on strings.endsWith(kName, encNsName)
- gen.go will need to have many more methods, and then double-on the 2 switch loops like:
switch k {
case "abc" : x.abc()
case "def" : x.def()
default {
switch {
case !nsAware: panic(...)
case strings.endsWith("nsabc"): x.abc()
default: panic(...)
}
}
}
The structure below accomodates this:
type typeInfo struct {
sfi []*structFieldInfo // sorted by encName
sfins // sorted by namespace
sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately.
sfip // unsorted
}
type structFieldInfo struct {
encName
nsEncName
ns string
attr bool
cdata bool
}
indexForEncName is now an internal helper function that takes a sorted array
(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...)
There will be a separate parser from the builder.
The parser will have a method: next() xmlToken method.
xmlToken has fields:
- type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text
- value string
- ns string
SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL
The following are skipped when parsing:
- External Entities (from external file)
- Notation Declaration e.g. <!NOTATION GIF87A SYSTEM "GIF">
- Entity Declarations & References
- XML Declaration (assume UTF-8)
- XML Directive i.e. <! ... >
- Other Declarations: Notation, etc.
- Comment
- Processing Instruction
- schema / DTD for validation:
We are not a VALIDATING parser. Validation is done elsewhere.
However, some parts of the DTD internal subset are used (SEE BELOW).
For Attribute List Declarations e.g.
<!ATTLIST foo:oldjoke name ID #REQUIRED label CDATA #IMPLIED status ( funny | notfunny ) 'funny' >
We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED)
The following XML features are supported
- Namespace
- Element
- Attribute
- cdata
- Unicode escape
The following DTD (when as an internal sub-set) features are supported:
- Internal Entities e.g.
<!ELEMENT burns "ugorji is cool" > AND entities for the set: [<>&"']
- Parameter entities e.g.
<!ENTITY % personcontent "ugorji is cool"> <!ELEMENT burns (%personcontent;)*>
At decode time, a structure containing the following is kept
- namespace mapping
- default attribute values
- all internal entities (<>&"' and others written in the document)
When decode starts, it parses XML namespace declarations and creates a map in the
xmlDecDriver. While parsing, that map continously gets updated.
The only problem happens when a namespace declaration happens on the node that it defines.
e.g. <hn:name xmlns:hn="http://www.ugorji.net" >
To handle this, each Element must be fully parsed at a time,
even if it amounts to multiple tokens which are returned one at a time on request.
xmlns is a special attribute name.
- It is used to define namespaces, including the default
- It is never returned as an AttrKey or AttrVal.
*We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.*
Number, bool, null, mapKey, etc can all be decoded from any xmlToken.
This accomodates map[int]string for example.
It should be possible to create a schema from the types,
or vice versa (generate types from schema with appropriate tags).
This is however out-of-scope from this parsing project.
We should write all namespace information at the first point that it is referenced in the tree,
and use the mapping for all child nodes and attributes. This means that state is maintained
at a point in the tree. This also means that calls to Decode or MustDecode will reset some state.
When decoding, it is important to keep track of entity references and default attribute values.
It seems these can only be stored in the DTD components. We should honor them when decoding.
Configuration for XMLHandle will look like this:
XMLHandle
DefaultNS string
// Encoding:
NS map[string]string // ns URI to key, used for encoding
// Decoding: in case ENTITY declared in external schema or dtd, store info needed here
Entities map[string]string // map of entity rep to character
During encode, if a namespace mapping is not defined for a namespace found on a struct,
then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict
with any other namespace mapping).
Note that different fields in a struct can have different namespaces.
However, all fields will default to the namespace on the _struct field (if defined).
An XML document is a name, a map of attributes and a list of children.
Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example).
We have to "DecodeNaked" into something that resembles XML data.
To support DecodeNaked (decode into nil interface{}) we have to define some "supporting" types:
type Name struct { // Prefered. Less allocations due to conversions.
Local string
Space string
}
type Element struct {
Name Name
Attrs map[Name]string
Children []interface{} // each child is either *Element or string
}
Only two "supporting" types are exposed for XML: Name and Element.
We considered 'type Name string' where Name is like "Space Local" (space-separated).
We decided against it, because each creation of a name would lead to
double allocation (first convert []byte to string, then concatenate them into a string).
The benefit is that it is faster to read Attrs from a map. But given that Element is a value
object, we want to eschew methods and have public exposed variables.
We also considered the following, where xml types were not value objects, and we used
intelligent accessor methods to extract information and for performance.
*** WE DECIDED AGAINST THIS. ***
type Attr struct {
Name Name
Value string
}
// Element is a ValueObject: There are no accessor methods.
// Make element self-contained.
type Element struct {
Name Name
attrsMap map[string]string // where key is "Space Local"
attrs []Attr
childrenT []string
childrenE []Element
childrenI []int // each child is a index into T or E.
}
func (x *Element) child(i) interface{} // returns string or *Element
Per XML spec and our default handling, white space is insignificant between elements,
specifically between parent-child or siblings. White space occuring alone between start
and end element IS significant. However, if xml:space='preserve', then we 'preserve'
all whitespace. This is more critical when doing a DecodeNaked, but MAY not be as critical
when decoding into a typed value.
**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.**
**So treat them as just "directives" that should be interpreted to mean something**.
On encoding, we don't add any prettifying markup (indenting, etc).
A document or element can only be encoded/decoded from/to a struct. In this mode:
- struct name maps to element name (or tag-info from _struct field)
- fields are mapped to child elements or attributes
A map is either encoded as attributes on current element, or as a set of child elements.
Maps are encoded as attributes iff their keys and values are primitives (number, bool, string).
A list is encoded as a set of child elements.
Primitives (number, bool, string) are encoded as an element, attribute or text
depending on the context.
Extensions must encode themselves as a text string.
Encoding is tough, specifically when encoding mappings, because we need to encode
as either attribute or element. To do this, we need to default to encoding as attributes,
and then let Encoder inform the Handle when to start encoding as nodes.
i.e. Encoder does something like:
h.EncodeMapStart()
h.Encode(), h.Encode(), ...
h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal
h.Encode(), h.Encode(), ...
h.EncodeEnd()
Only XMLHandle understands this, and will set itself to start encoding as elements.
This support extends to maps. For example, if a struct field is a map, and it has
the struct tag signifying it should be attr, then all its fields are encoded as attributes.
e.g.
type X struct {
M map[string]int `codec:"m,attr"` // encode as attributes
}
Question:
- if encoding a map, what if map keys have spaces in them???
Then they cannot be attributes or child elements. Error.
Misc:
- For attribute values, normalize by trimming beginning and ending white space,
and converting every white space sequence to a single space.
- ATTLIST restrictions are enforced.
e.g. default value of xml:space, skipping xml:XYZ style attributes, etc.
- Consider supporting NON-STRICT mode (e.g. to handle HTML parsing).
Some elements e.g. br, hr, etc need not close and should be auto-closed
... (see http://www.w3.org/TR/html4/loose.dtd)
An expansive set of entities are pre-defined.
- Have easy way to create a HTML parser:
add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose,
and add HTML Entities to the list.
- Support validating element/attribute XMLName before writing it.
Keep this behind a flag, which is set to false by default (for performance).
type XMLHandle struct {
CheckName bool
}
ROADMAP (1 weeks):
- build encoder (1 day)
- build decoder (based off xmlParser) (1 day)
- implement xmlParser (2 days).
Look at encoding/xml for inspiration.
- integrate and TEST (1 days)
- write article and post it (1 day)
*/
// ----------- PARSER -------------------
type xmlTokenType uint8
const (
_ xmlTokenType = iota << 1
xmlTokenElemStart
xmlTokenElemEnd
xmlTokenAttrKey
xmlTokenAttrVal
xmlTokenText
)
type xmlToken struct {
Type xmlTokenType
Value string
Namespace string // blank for AttrVal and Text
}
type xmlParser struct {
r decReader
toks []xmlToken // list of tokens.
ptr int // ptr into the toks slice
done bool // nothing else to parse. r now returns EOF.
}
func (x *xmlParser) next() (t *xmlToken) {
// once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish)
if !x.done && len(x.toks) == 0 {
x.nextTag()
}
// parses one element at a time (into possible many tokens)
if x.ptr < len(x.toks) {
t = &(x.toks[x.ptr])
x.ptr++
if x.ptr == len(x.toks) {
x.ptr = 0
x.toks = x.toks[:0]
}
}
return
}
// nextTag will parses the next element and fill up toks.
// It set done flag if/once EOF is reached.
func (x *xmlParser) nextTag() {
// TODO: implement.
}
// ----------- ENCODER -------------------
type xmlEncDriver struct {
e *Encoder
w encWriter
h *XMLHandle
b [64]byte // scratch
bs []byte // scratch
// s jsonStack
noBuiltInTypes
}
// ----------- DECODER -------------------
type xmlDecDriver struct {
d *Decoder
h *XMLHandle
r decReader // *bytesDecReader decReader
ct valueType // container type. one of unset, array or map.
bstr [8]byte // scratch used for string \UXXX parsing
b [64]byte // scratch
// wsSkipped bool // whitespace skipped
// s jsonStack
noBuiltInTypes
}
// DecodeNaked will decode into an XMLNode
// XMLName is a value object representing a namespace-aware NAME
type XMLName struct {
Local string
Space string
}
// XMLNode represents a "union" of the different types of XML Nodes.
// Only one of fields (Text or *Element) is set.
type XMLNode struct {
Element *Element
Text string
}
// XMLElement is a value object representing an fully-parsed XML element.
type XMLElement struct {
Name Name
Attrs map[XMLName]string
// Children is a list of child nodes, each being a *XMLElement or string
Children []XMLNode
}
// ----------- HANDLE -------------------
type XMLHandle struct {
BasicHandle
textEncodingType
DefaultNS string
NS map[string]string // ns URI to key, for encoding
Entities map[string]string // entity representation to string, for encoding.
}
func (h *XMLHandle) newEncDriver(e *Encoder) encDriver {
return &xmlEncDriver{e: e, w: e.w, h: h}
}
func (h *XMLHandle) newDecDriver(d *Decoder) decDriver {
// d := xmlDecDriver{r: r.(*bytesDecReader), h: h}
hd := xmlDecDriver{d: d, r: d.r, h: h}
hd.n.bytes = d.b[:]
return &hd
}
func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
}
var _ decDriver = (*xmlDecDriver)(nil)
var _ encDriver = (*xmlEncDriver)(nil)

6
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: f12c87d509e99534547e948533b8e8a88043c2261c0be08f155a26cd920b5fd4
updated: 2017-09-11T14:52:48.727295105-07:00
hash: 49950d40cee960e66f818746fca6ccaa849c62851d81d0960f6089c3677eec65
updated: 2017-09-19T14:50:30.702073385+02:00
imports:
- name: github.com/beorn7/perks
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
@ -102,7 +102,7 @@ imports:
- name: github.com/spf13/pflag
version: e57e3eeb33f795204c1ca35f56c44f83227c6e66
- name: github.com/ugorji/go
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
version: 54210f4e076c57f351166f0ed60e67d3fca57a36
subpackages:
- codec
- name: github.com/urfave/cli

View File

@ -68,7 +68,7 @@ import:
- package: github.com/spf13/pflag
version: v1.0.0
- package: github.com/ugorji/go
version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
version: 54210f4e076c57f351166f0ed60e67d3fca57a36
subpackages:
- codec
- package: github.com/urfave/cli