mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #6967 from heyitsanthony/glide-versions
vendor: use version tags if possible
This commit is contained in:
commit
1e92b7929c
20
cmd/vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
cmd/vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2
cmd/vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
2
cmd/vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
|
29
cmd/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
generated
vendored
29
cmd/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
generated
vendored
@ -11,6 +11,8 @@ import (
|
||||
|
||||
type roffRenderer struct{}
|
||||
|
||||
var listCounter int
|
||||
|
||||
func RoffRenderer(flags int) blackfriday.Renderer {
|
||||
return &roffRenderer{}
|
||||
}
|
||||
@ -33,8 +35,12 @@ func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
|
||||
line = append(line, []byte("\" ")...)
|
||||
out.Write(line)
|
||||
}
|
||||
out.WriteString("\n")
|
||||
|
||||
out.WriteString(" \"\"\n")
|
||||
// disable hyphenation
|
||||
out.WriteString(".nh\n")
|
||||
// disable justification (adjust text to left margin only)
|
||||
out.WriteString(".ad l\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
|
||||
@ -80,23 +86,24 @@ func (r *roffRenderer) HRule(out *bytes.Buffer) {
|
||||
|
||||
func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
|
||||
marker := out.Len()
|
||||
out.WriteString(".IP ")
|
||||
if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
|
||||
out.WriteString("\\(bu 2")
|
||||
} else {
|
||||
out.WriteString("\\n+[step" + string(flags) + "]")
|
||||
listCounter = 1
|
||||
}
|
||||
out.WriteString("\n")
|
||||
if !text() {
|
||||
out.Truncate(marker)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
|
||||
out.WriteString("\n\\item ")
|
||||
if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
|
||||
out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", listCounter))
|
||||
listCounter += 1
|
||||
} else {
|
||||
out.WriteString(".IP \\(bu 2\n")
|
||||
}
|
||||
out.Write(text)
|
||||
out.WriteString("\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
|
||||
@ -185,6 +192,7 @@ func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
|
||||
out.Write(content)
|
||||
r.AutoLink(out, link, 0)
|
||||
}
|
||||
|
||||
@ -249,6 +257,11 @@ func needsBackslash(c byte) bool {
|
||||
|
||||
func escapeSpecialChars(out *bytes.Buffer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// escape initial apostrophe or period
|
||||
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||
out.WriteString("\\&")
|
||||
}
|
||||
|
||||
// directly copy normal characters
|
||||
org := i
|
||||
|
||||
|
112
cmd/vendor/github.com/gogo/protobuf/proto/decode.go
generated
vendored
112
cmd/vendor/github.com/gogo/protobuf/proto/decode.go
generated
vendored
@ -61,6 +61,7 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
// x, n already 0
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
@ -77,7 +78,13 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
@ -100,107 +107,6 @@ func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
// x -= 0x80 << 63 // Always zero.
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
@ -434,8 +340,6 @@ func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
|
100
cmd/vendor/github.com/gogo/protobuf/proto/duration.go
generated
vendored
100
cmd/vendor/github.com/gogo/protobuf/proto/duration.go
generated
vendored
@ -1,100 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid Duration
|
||||
// may still be too large to fit into a time.Duration (the range of Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||
// returns an error if the Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func durationFromProto(p *duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos)
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a Duration.
|
||||
func durationProto(d time.Duration) *duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
202
cmd/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
generated
vendored
202
cmd/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
|
||||
|
||||
type duration struct {
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *duration) Reset() { *m = duration{} }
|
||||
func (*duration) ProtoMessage() {}
|
||||
func (*duration) String() string { return "duration<string>" }
|
||||
|
||||
func init() {
|
||||
RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
|
||||
}
|
||||
|
||||
func (o *Buffer) decDuration() (time.Duration, error) {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dproto := &duration{}
|
||||
if err := Unmarshal(b, dproto); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return durationFromProto(dproto)
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64_Set(structPointer_Word64(base, p.field), o, uint64(d))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType)))
|
||||
setPtrCustomType(newBas, 0, &d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error {
|
||||
d, err := o.decDuration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
structPointer_Word64Slice(base, p.field).Append(uint64(d))
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_duration(p *Properties, base structPointer) (n int) {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return 0
|
||||
}
|
||||
dur := structPointer_Interface(structp, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
size := Size(d)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_duration(p *Properties, base structPointer) error {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return ErrNil
|
||||
}
|
||||
dur := structPointer_Interface(structp, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
data, err := Marshal(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_ref_duration(p *Properties, base structPointer) (n int) {
|
||||
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
size := Size(d)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error {
|
||||
dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration)
|
||||
d := durationProto(*dur)
|
||||
data, err := Marshal(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_duration(p *Properties, base structPointer) (n int) {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
if durs[i] == nil {
|
||||
return 0
|
||||
}
|
||||
dproto := durationProto(*durs[i])
|
||||
size := Size(dproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
if durs[i] == nil {
|
||||
return errRepeatedHasNil
|
||||
}
|
||||
dproto := durationProto(*durs[i])
|
||||
data, err := Marshal(dproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_ref_duration(p *Properties, base structPointer) (n int) {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
dproto := durationProto(durs[i])
|
||||
size := Size(dproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error {
|
||||
pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration)
|
||||
durs := *pdurs
|
||||
for i := 0; i < len(durs); i++ {
|
||||
dproto := durationProto(durs[i])
|
||||
data, err := Marshal(dproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
14
cmd/vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
14
cmd/vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
@ -234,6 +234,10 @@ func Marshal(pb Message) ([]byte, error) {
|
||||
}
|
||||
p := NewBuffer(nil)
|
||||
err := p.Marshal(pb)
|
||||
var state errorState
|
||||
if err != nil && !state.shouldContinue(err, nil) {
|
||||
return nil, err
|
||||
}
|
||||
if p.buf == nil && err == nil {
|
||||
// Return a non-nil slice on success.
|
||||
return []byte{}, nil
|
||||
@ -262,8 +266,11 @@ func (p *Buffer) Marshal(pb Message) error {
|
||||
// Can the object marshal itself?
|
||||
if m, ok := pb.(Marshaler); ok {
|
||||
data, err := m.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.buf = append(p.buf, data...)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
t, base, err := getbase(pb)
|
||||
@ -275,7 +282,7 @@ func (p *Buffer) Marshal(pb Message) error {
|
||||
}
|
||||
|
||||
if collectStats {
|
||||
(stats).Encode++ // Parens are to work around a goimports bug.
|
||||
stats.Encode++
|
||||
}
|
||||
|
||||
if len(p.buf) > maxMarshalSize {
|
||||
@ -302,7 +309,7 @@ func Size(pb Message) (n int) {
|
||||
}
|
||||
|
||||
if collectStats {
|
||||
(stats).Size++ // Parens are to work around a goimports bug.
|
||||
stats.Size++
|
||||
}
|
||||
|
||||
return
|
||||
@ -1007,6 +1014,7 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
|
||||
if p.isMarshaler {
|
||||
m := structPointer_Interface(structp, p.stype).(Marshaler)
|
||||
data, _ := m.Marshal()
|
||||
n += len(p.tagcode)
|
||||
n += sizeRawBytes(data)
|
||||
continue
|
||||
}
|
||||
|
8
cmd/vendor/github.com/gogo/protobuf/proto/equal.go
generated
vendored
8
cmd/vendor/github.com/gogo/protobuf/proto/equal.go
generated
vendored
@ -54,17 +54,13 @@ Equality is defined in this way:
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
and their corresponding elements are equal (a "bytes" field,
|
||||
although represented by []byte, is not a repeated field)
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
|
3
cmd/vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
3
cmd/vendor/github.com/gogo/protobuf/proto/extensions.go
generated
vendored
@ -587,9 +587,6 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
|
2
cmd/vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
2
cmd/vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
index int // write point
|
||||
|
||||
// pools of basic types to amortize allocation.
|
||||
bools []bool
|
||||
|
40
cmd/vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
40
cmd/vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@ -190,11 +190,10 @@ type Properties struct {
|
||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
CustomType string
|
||||
StdTime bool
|
||||
StdDuration bool
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
CustomType string
|
||||
def_uint64 uint64
|
||||
|
||||
enc encoder
|
||||
valEnc valueEncoder // set for bool and numeric types only
|
||||
@ -341,10 +340,6 @@ func (p *Properties) Parse(s string) {
|
||||
p.OrigName = strings.Split(f, "=")[1]
|
||||
case strings.HasPrefix(f, "customtype="):
|
||||
p.CustomType = strings.Split(f, "=")[1]
|
||||
case f == "stdtime":
|
||||
p.StdTime = true
|
||||
case f == "stdduration":
|
||||
p.StdDuration = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -360,22 +355,11 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
||||
p.enc = nil
|
||||
p.dec = nil
|
||||
p.size = nil
|
||||
isMap := typ.Kind() == reflect.Map
|
||||
if len(p.CustomType) > 0 && !isMap {
|
||||
if len(p.CustomType) > 0 {
|
||||
p.setCustomEncAndDec(typ)
|
||||
p.setTag(lockGetProp)
|
||||
return
|
||||
}
|
||||
if p.StdTime && !isMap {
|
||||
p.setTimeEncAndDec(typ)
|
||||
p.setTag(lockGetProp)
|
||||
return
|
||||
}
|
||||
if p.StdDuration && !isMap {
|
||||
p.setDurationEncAndDec(typ)
|
||||
p.setTag(lockGetProp)
|
||||
return
|
||||
}
|
||||
switch t1 := typ; t1.Kind() {
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
||||
@ -646,10 +630,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
|
||||
p.mvalprop.CustomType = p.CustomType
|
||||
p.mvalprop.StdDuration = p.StdDuration
|
||||
p.mvalprop.StdTime = p.StdTime
|
||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
p.setTag(lockGetProp)
|
||||
@ -940,15 +920,7 @@ func RegisterType(x Message, name string) {
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
||||
|
45
cmd/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
generated
vendored
45
cmd/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
generated
vendored
@ -51,51 +51,6 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Properties) setDurationEncAndDec(typ reflect.Type) {
|
||||
if p.Repeated {
|
||||
if typ.Elem().Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_slice_duration
|
||||
p.dec = (*Buffer).dec_slice_duration
|
||||
p.size = size_slice_duration
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_ref_duration
|
||||
p.dec = (*Buffer).dec_slice_ref_duration
|
||||
p.size = size_slice_ref_duration
|
||||
}
|
||||
} else if typ.Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_duration
|
||||
p.dec = (*Buffer).dec_duration
|
||||
p.size = size_duration
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_ref_duration
|
||||
p.dec = (*Buffer).dec_ref_duration
|
||||
p.size = size_ref_duration
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Properties) setTimeEncAndDec(typ reflect.Type) {
|
||||
if p.Repeated {
|
||||
if typ.Elem().Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_slice_time
|
||||
p.dec = (*Buffer).dec_slice_time
|
||||
p.size = size_slice_time
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_slice_ref_time
|
||||
p.dec = (*Buffer).dec_slice_ref_time
|
||||
p.size = size_slice_ref_time
|
||||
}
|
||||
} else if typ.Kind() == reflect.Ptr {
|
||||
p.enc = (*Buffer).enc_time
|
||||
p.dec = (*Buffer).dec_time
|
||||
p.size = size_time
|
||||
} else {
|
||||
p.enc = (*Buffer).enc_ref_time
|
||||
p.dec = (*Buffer).dec_ref_time
|
||||
p.size = size_ref_time
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) {
|
||||
t2 := typ.Elem()
|
||||
p.sstype = typ
|
||||
|
177
cmd/vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
177
cmd/vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
@ -51,7 +51,6 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -182,93 +181,7 @@ type raw interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
func writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
@ -321,10 +234,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
continue
|
||||
}
|
||||
if len(props.Enum) > 0 {
|
||||
if err := tm.writeEnum(w, v, props); err != nil {
|
||||
if err := writeEnum(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := tm.writeAny(w, v, props); err != nil {
|
||||
} else if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
@ -366,7 +279,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
||||
if err := writeAny(w, key, props.mkeyprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
@ -383,7 +296,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
||||
if err := writeAny(w, val, props.mvalprop); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
@ -455,10 +368,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
}
|
||||
|
||||
if len(props.Enum) > 0 {
|
||||
if err := tm.writeEnum(w, fv, props); err != nil {
|
||||
if err := writeEnum(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := tm.writeAny(w, fv, props); err != nil {
|
||||
} else if err := writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -476,7 +389,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
pv.Elem().Set(sv)
|
||||
}
|
||||
if pv.Type().Implements(extensionRangeType) {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
if err := writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -506,45 +419,20 @@ func writeRaw(w *textWriter, b []byte) error {
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
if props != nil {
|
||||
if len(props.CustomType) > 0 {
|
||||
custom, ok := v.Interface().(Marshaler)
|
||||
if ok {
|
||||
data, err := custom.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeString(w, string(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else if props.StdTime {
|
||||
t, ok := v.Interface().(time.Time)
|
||||
if !ok {
|
||||
return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
|
||||
}
|
||||
tproto, err := timestampProto(t)
|
||||
if props != nil && len(props.CustomType) > 0 {
|
||||
custom, ok := v.Interface().(Marshaler)
|
||||
if ok {
|
||||
data, err := custom.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
props.StdTime = false
|
||||
err = tm.writeAny(w, reflect.ValueOf(tproto), props)
|
||||
props.StdTime = true
|
||||
return err
|
||||
} else if props.StdDuration {
|
||||
d, ok := v.Interface().(time.Duration)
|
||||
if !ok {
|
||||
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
|
||||
if err := writeString(w, string(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
dproto := durationProto(d)
|
||||
props.StdDuration = false
|
||||
err := tm.writeAny(w, reflect.ValueOf(dproto), props)
|
||||
props.StdDuration = true
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -594,15 +482,15 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := tm.writeStruct(w, v); err != nil {
|
||||
} else if err := writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
w.unindent()
|
||||
@ -746,7 +634,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
func writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
e := pv.Interface().(Message)
|
||||
|
||||
@ -801,13 +689,13 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
if err := writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -816,7 +704,7 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
func writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -825,7 +713,7 @@ func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
@ -852,13 +740,12 @@ func (w *textWriter) writeIndent() {
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
Compact bool // use compact text format (one line).
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
@ -873,11 +760,11 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
compact: m.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if tm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := tm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -891,7 +778,7 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
if err := writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
@ -901,9 +788,9 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
func (m *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
m.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
|
6
cmd/vendor/github.com/gogo/protobuf/proto/text_gogo.go
generated
vendored
6
cmd/vendor/github.com/gogo/protobuf/proto/text_gogo.go
generated
vendored
@ -33,10 +33,10 @@ import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
func writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
m, ok := enumStringMaps[props.Enum]
|
||||
if !ok {
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -48,7 +48,7 @@ func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Proper
|
||||
}
|
||||
s, ok := m[key]
|
||||
if !ok {
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
if err := writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
195
cmd/vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
195
cmd/vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
@ -46,13 +46,9 @@ import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
@ -172,7 +168,7 @@ func (p *textParser) advance() {
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
@ -460,10 +456,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
// "[extension]".
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
@ -473,74 +466,33 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
// Looks like an extension.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
if d.Name == tok.value {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
return p.errorf("unrecognized extension %q", tok.value)
|
||||
}
|
||||
// Check the extension terminator.
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != "]" {
|
||||
return p.errorf("unrecognized extension terminator %q", tok.value)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
@ -598,11 +550,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
sv.Field(oop.Field).Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
@ -709,35 +657,6 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
@ -798,80 +717,6 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if props.StdTime {
|
||||
fv := v
|
||||
p.back()
|
||||
props.StdTime = false
|
||||
tproto := ×tamp{}
|
||||
err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
|
||||
props.StdTime = true
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tim, err := timestampFromProto(tproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Repeated {
|
||||
t := reflect.TypeOf(v.Interface())
|
||||
if t.Kind() == reflect.Slice {
|
||||
if t.Elem().Kind() == reflect.Ptr {
|
||||
ts := fv.Interface().([]*time.Time)
|
||||
ts = append(ts, &tim)
|
||||
fv.Set(reflect.ValueOf(ts))
|
||||
return nil
|
||||
} else {
|
||||
ts := fv.Interface().([]time.Time)
|
||||
ts = append(ts, tim)
|
||||
fv.Set(reflect.ValueOf(ts))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
|
||||
v.Set(reflect.ValueOf(&tim))
|
||||
} else {
|
||||
v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if props.StdDuration {
|
||||
fv := v
|
||||
p.back()
|
||||
props.StdDuration = false
|
||||
dproto := &duration{}
|
||||
err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
|
||||
props.StdDuration = true
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dur, err := durationFromProto(dproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Repeated {
|
||||
t := reflect.TypeOf(v.Interface())
|
||||
if t.Kind() == reflect.Slice {
|
||||
if t.Elem().Kind() == reflect.Ptr {
|
||||
ds := fv.Interface().([]*time.Duration)
|
||||
ds = append(ds, &dur)
|
||||
fv.Set(reflect.ValueOf(ds))
|
||||
return nil
|
||||
} else {
|
||||
ds := fv.Interface().([]time.Duration)
|
||||
ds = append(ds, dur)
|
||||
fv.Set(reflect.ValueOf(ds))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
|
||||
v.Set(reflect.ValueOf(&dur))
|
||||
} else {
|
||||
v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
@ -914,12 +759,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
// Either "true", "false", 1 or 0.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
case "true", "1":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
case "false", "0":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
|
113
cmd/vendor/github.com/gogo/protobuf/proto/timestamp.go
generated
vendored
113
cmd/vendor/github.com/gogo/protobuf/proto/timestamp.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func timestampFromProto(ts *timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func timestampProto(t time.Time) (*timestamp, error) {
|
||||
seconds := t.Unix()
|
||||
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||
ts := ×tamp{
|
||||
Seconds: seconds,
|
||||
Nanos: nanos,
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
227
cmd/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
generated
vendored
227
cmd/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
generated
vendored
@ -1,227 +0,0 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
|
||||
type timestamp struct {
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
}
|
||||
|
||||
func (m *timestamp) Reset() { *m = timestamp{} }
|
||||
func (*timestamp) ProtoMessage() {}
|
||||
func (*timestamp) String() string { return "timestamp<string>" }
|
||||
|
||||
func init() {
|
||||
RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
|
||||
}
|
||||
|
||||
func (o *Buffer) decTimestamp() (time.Time, error) {
|
||||
b, err := o.DecodeRawBytes(true)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
tproto := ×tamp{}
|
||||
if err := Unmarshal(b, tproto); err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return timestampFromProto(tproto)
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setPtrCustomType(base, p.field, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setCustomType(base, p.field, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType)))
|
||||
setPtrCustomType(newBas, 0, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error {
|
||||
t, err := o.decTimestamp()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType))
|
||||
setCustomType(newBas, 0, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_time(p *Properties, base structPointer) (n int) {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return 0
|
||||
}
|
||||
tim := structPointer_Interface(structp, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(t)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_time(p *Properties, base structPointer) error {
|
||||
structp := structPointer_GetStructPointer(base, p.field)
|
||||
if structPointer_IsNil(structp) {
|
||||
return ErrNil
|
||||
}
|
||||
tim := structPointer_Interface(structp, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_ref_time(p *Properties, base structPointer) (n int) {
|
||||
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(t)
|
||||
return size + sizeVarint(uint64(size)) + len(p.tagcode)
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error {
|
||||
tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time)
|
||||
t, err := timestampProto(*tim)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_time(p *Properties, base structPointer) (n int) {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
if tims[i] == nil {
|
||||
return 0
|
||||
}
|
||||
tproto, err := timestampProto(*tims[i])
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(tproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
if tims[i] == nil {
|
||||
return errRepeatedHasNil
|
||||
}
|
||||
tproto, err := timestampProto(*tims[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(tproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func size_slice_ref_time(p *Properties, base structPointer) (n int) {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
tproto, err := timestampProto(tims[i])
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
size := Size(tproto)
|
||||
n += len(p.tagcode) + size + sizeVarint(uint64(size))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error {
|
||||
ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time)
|
||||
tims := *ptims
|
||||
for i := 0; i < len(tims); i++ {
|
||||
tproto, err := timestampProto(tims[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := Marshal(tproto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.buf = append(o.buf, p.tagcode...)
|
||||
o.EncodeRawBytes(data)
|
||||
}
|
||||
return nil
|
||||
}
|
21
cmd/vendor/github.com/mattn/go-runewidth/LICENSE
generated
vendored
Normal file
21
cmd/vendor/github.com/mattn/go-runewidth/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
23
cmd/vendor/github.com/mattn/go-runewidth/runewidth.go
generated
vendored
23
cmd/vendor/github.com/mattn/go-runewidth/runewidth.go
generated
vendored
@ -1,7 +1,12 @@
|
||||
package runewidth
|
||||
|
||||
var EastAsianWidth = IsEastAsian()
|
||||
var DefaultCondition = &Condition{EastAsianWidth}
|
||||
var (
|
||||
// EastAsianWidth will be set true if the current locale is CJK
|
||||
EastAsianWidth = IsEastAsian()
|
||||
|
||||
// DefaultCondition is a condition in current locale
|
||||
DefaultCondition = &Condition{EastAsianWidth}
|
||||
)
|
||||
|
||||
type interval struct {
|
||||
first rune
|
||||
@ -302,10 +307,12 @@ var ctypes = []intervalType{
|
||||
{0x100000, 0x10FFFE, ambiguous},
|
||||
}
|
||||
|
||||
// Condition have flag EastAsianWidth whether the current locale is CJK or not.
|
||||
type Condition struct {
|
||||
EastAsianWidth bool
|
||||
}
|
||||
|
||||
// NewCondition return new instance of Condition which is current locale.
|
||||
func NewCondition() *Condition {
|
||||
return &Condition{EastAsianWidth}
|
||||
}
|
||||
@ -344,6 +351,7 @@ func (c *Condition) RuneWidth(r rune) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// StringWidth return width as you can see
|
||||
func (c *Condition) StringWidth(s string) (width int) {
|
||||
for _, r := range []rune(s) {
|
||||
width += c.RuneWidth(r)
|
||||
@ -351,6 +359,7 @@ func (c *Condition) StringWidth(s string) (width int) {
|
||||
return width
|
||||
}
|
||||
|
||||
// Truncate return string truncated with w cells
|
||||
func (c *Condition) Truncate(s string, w int, tail string) string {
|
||||
if c.StringWidth(s) <= w {
|
||||
return s
|
||||
@ -370,6 +379,7 @@ func (c *Condition) Truncate(s string, w int, tail string) string {
|
||||
return string(r[0:i]) + tail
|
||||
}
|
||||
|
||||
// Wrap return string wrapped with w cells
|
||||
func (c *Condition) Wrap(s string, w int) string {
|
||||
width := 0
|
||||
out := ""
|
||||
@ -392,6 +402,7 @@ func (c *Condition) Wrap(s string, w int) string {
|
||||
return out
|
||||
}
|
||||
|
||||
// FillLeft return string filled in left by spaces in w cells
|
||||
func (c *Condition) FillLeft(s string, w int) string {
|
||||
width := c.StringWidth(s)
|
||||
count := w - width
|
||||
@ -405,6 +416,7 @@ func (c *Condition) FillLeft(s string, w int) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// FillRight return string filled in left by spaces in w cells
|
||||
func (c *Condition) FillRight(s string, w int) string {
|
||||
width := c.StringWidth(s)
|
||||
count := w - width
|
||||
@ -438,27 +450,32 @@ func IsAmbiguousWidth(r rune) bool {
|
||||
return ct(r) == ambiguous
|
||||
}
|
||||
|
||||
// IsAmbiguousWidth returns whether is ambiguous width or not.
|
||||
// IsNeutralWidth returns whether is neutral width or not.
|
||||
func IsNeutralWidth(r rune) bool {
|
||||
return ct(r) == neutral
|
||||
}
|
||||
|
||||
// StringWidth return width as you can see
|
||||
func StringWidth(s string) (width int) {
|
||||
return DefaultCondition.StringWidth(s)
|
||||
}
|
||||
|
||||
// Truncate return string truncated with w cells
|
||||
func Truncate(s string, w int, tail string) string {
|
||||
return DefaultCondition.Truncate(s, w, tail)
|
||||
}
|
||||
|
||||
// Wrap return string wrapped with w cells
|
||||
func Wrap(s string, w int) string {
|
||||
return DefaultCondition.Wrap(s, w)
|
||||
}
|
||||
|
||||
// FillLeft return string filled in left by spaces in w cells
|
||||
func FillLeft(s string, w int) string {
|
||||
return DefaultCondition.FillLeft(s, w)
|
||||
}
|
||||
|
||||
// FillRight return string filled in left by spaces in w cells
|
||||
func FillRight(s string, w int) string {
|
||||
return DefaultCondition.FillRight(s, w)
|
||||
}
|
||||
|
72
cmd/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
generated
vendored
72
cmd/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
generated
vendored
@ -10,20 +10,25 @@ import (
|
||||
|
||||
var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
|
||||
|
||||
func IsEastAsian() bool {
|
||||
locale := os.Getenv("LC_CTYPE")
|
||||
if locale == "" {
|
||||
locale = os.Getenv("LANG")
|
||||
}
|
||||
|
||||
// ignore C locale
|
||||
if locale == "POSIX" || locale == "C" {
|
||||
return false
|
||||
}
|
||||
if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
|
||||
return false
|
||||
}
|
||||
var mblenTable = map[string]int{
|
||||
"utf-8": 6,
|
||||
"utf8": 6,
|
||||
"jis": 8,
|
||||
"eucjp": 3,
|
||||
"euckr": 2,
|
||||
"euccn": 2,
|
||||
"sjis": 2,
|
||||
"cp932": 2,
|
||||
"cp51932": 2,
|
||||
"cp936": 2,
|
||||
"cp949": 2,
|
||||
"cp950": 2,
|
||||
"big5": 2,
|
||||
"gbk": 2,
|
||||
"gb2312": 2,
|
||||
}
|
||||
|
||||
func isEastAsian(locale string) bool {
|
||||
charset := strings.ToLower(locale)
|
||||
r := reLoc.FindStringSubmatch(locale)
|
||||
if len(r) == 2 {
|
||||
@ -40,26 +45,11 @@ func IsEastAsian() bool {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
mbc_max := 1
|
||||
switch charset {
|
||||
case "utf-8", "utf8":
|
||||
mbc_max = 6
|
||||
case "jis":
|
||||
mbc_max = 8
|
||||
case "eucjp":
|
||||
mbc_max = 3
|
||||
case "euckr", "euccn":
|
||||
mbc_max = 2
|
||||
case "sjis", "cp932", "cp51932", "cp936", "cp949", "cp950":
|
||||
mbc_max = 2
|
||||
case "big5":
|
||||
mbc_max = 2
|
||||
case "gbk", "gb2312":
|
||||
mbc_max = 2
|
||||
max := 1
|
||||
if m, ok := mblenTable[charset]; ok {
|
||||
max = m
|
||||
}
|
||||
|
||||
if mbc_max > 1 && (charset[0] != 'u' ||
|
||||
if max > 1 && (charset[0] != 'u' ||
|
||||
strings.HasPrefix(locale, "ja") ||
|
||||
strings.HasPrefix(locale, "ko") ||
|
||||
strings.HasPrefix(locale, "zh")) {
|
||||
@ -67,3 +57,21 @@ func IsEastAsian() bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEastAsian return true if the current locale is CJK
|
||||
func IsEastAsian() bool {
|
||||
locale := os.Getenv("LC_CTYPE")
|
||||
if locale == "" {
|
||||
locale = os.Getenv("LANG")
|
||||
}
|
||||
|
||||
// ignore C locale
|
||||
if locale == "POSIX" || locale == "C" {
|
||||
return false
|
||||
}
|
||||
if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
|
||||
return false
|
||||
}
|
||||
|
||||
return isEastAsian(locale)
|
||||
}
|
||||
|
1
cmd/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
generated
vendored
1
cmd/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
generated
vendored
@ -9,6 +9,7 @@ var (
|
||||
procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
|
||||
)
|
||||
|
||||
// IsEastAsian return true if the current locale is CJK
|
||||
func IsEastAsian() bool {
|
||||
r1, _, _ := procGetConsoleOutputCP.Call()
|
||||
if r1 == 0 {
|
||||
|
4
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
4
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
@ -178,7 +178,7 @@
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2013 Matt T. Proud
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
1
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
Normal file
1
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
2
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
2
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
@ -38,7 +38,7 @@ var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||
// CodedInputStream#readRawVarint32.
|
||||
headerBuf := make([]byte, binary.MaxVarintLen32)
|
||||
var headerBuf [binary.MaxVarintLen32]byte
|
||||
var bytesRead, varIntBytes int
|
||||
var messageLength uint64
|
||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||
|
4
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
4
cmd/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
@ -33,8 +33,8 @@ func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
buf := make([]byte, binary.MaxVarintLen32)
|
||||
encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
|
||||
var buf [binary.MaxVarintLen32]byte
|
||||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||
|
||||
sync, err := w.Write(buf[:encodedLength])
|
||||
if err != nil {
|
||||
|
5
cmd/vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
5
cmd/vendor/github.com/prometheus/client_golang/NOTICE
generated
vendored
@ -7,11 +7,6 @@ SoundCloud Ltd. (http://soundcloud.com/).
|
||||
|
||||
The following components are included in this product:
|
||||
|
||||
goautoneg
|
||||
http://bitbucket.org/ww/goautoneg
|
||||
Copyright 2011, Open Knowledge Foundation Ltd.
|
||||
See README.txt for license details.
|
||||
|
||||
perks - a fork of https://github.com/bmizerany/perks
|
||||
https://github.com/beorn7/perks
|
||||
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
|
||||
|
52
cmd/vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
52
cmd/vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
@ -15,15 +15,15 @@ package prometheus
|
||||
|
||||
// Collector is the interface implemented by anything that can be used by
|
||||
// Prometheus to collect metrics. A Collector has to be registered for
|
||||
// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet.
|
||||
// collection. See Registerer.Register.
|
||||
//
|
||||
// The stock metrics provided by this package (like Gauge, Counter, Summary) are
|
||||
// also Collectors (which only ever collect one metric, namely itself). An
|
||||
// implementer of Collector may, however, collect multiple metrics in a
|
||||
// coordinated fashion and/or create metrics on the fly. Examples for collectors
|
||||
// already implemented in this library are the metric vectors (i.e. collection
|
||||
// of multiple instances of the same Metric but with different label values)
|
||||
// like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
// The stock metrics provided by this package (Gauge, Counter, Summary,
|
||||
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
|
||||
// namely itself). An implementer of Collector may, however, collect multiple
|
||||
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
|
||||
// for collectors already implemented in this library are the metric vectors
|
||||
// (i.e. collection of multiple instances of the same Metric but with different
|
||||
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
|
||||
type Collector interface {
|
||||
// Describe sends the super-set of all possible descriptors of metrics
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
@ -37,39 +37,39 @@ type Collector interface {
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by Prometheus when collecting metrics. The
|
||||
// implementation sends each collected metric via the provided channel
|
||||
// and returns once the last metric has been sent. The descriptor of
|
||||
// each sent metric is one of those returned by Describe. Returned
|
||||
// metrics that share the same descriptor must differ in their variable
|
||||
// label values. This method may be called concurrently and must
|
||||
// therefore be implemented in a concurrency safe way. Blocking occurs
|
||||
// at the expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Collector implementations support concurrent
|
||||
// readers.
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// SelfCollector implements Collector for a single Metric so that that the
|
||||
// Metric collects itself. Add it as an anonymous field to a struct that
|
||||
// implements Metric, and call Init with the Metric itself as an argument.
|
||||
type SelfCollector struct {
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
type selfCollector struct {
|
||||
self Metric
|
||||
}
|
||||
|
||||
// Init provides the SelfCollector with a reference to the metric it is supposed
|
||||
// init provides the selfCollector with a reference to the metric it is supposed
|
||||
// to collect. It is usually called within the factory function to create a
|
||||
// metric. See example.
|
||||
func (c *SelfCollector) Init(self Metric) {
|
||||
func (c *selfCollector) init(self Metric) {
|
||||
c.self = self
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (c *SelfCollector) Describe(ch chan<- *Desc) {
|
||||
func (c *selfCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.self.Desc()
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (c *SelfCollector) Collect(ch chan<- Metric) {
|
||||
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||
ch <- c.self
|
||||
}
|
||||
|
31
cmd/vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
31
cmd/vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -15,7 +15,6 @@ package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
@ -36,6 +35,9 @@ type Counter interface {
|
||||
// Prometheus metric. Do not use it for regular handling of a
|
||||
// Prometheus counter (as it can be used to break the contract of
|
||||
// monotonically increasing values).
|
||||
//
|
||||
// Deprecated: Use NewConstMetric to create a counter for an external
|
||||
// value. A Counter should never be set.
|
||||
Set(float64)
|
||||
// Inc increments the counter by 1.
|
||||
Inc()
|
||||
@ -56,7 +58,7 @@ func NewCounter(opts CounterOpts) Counter {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result.Init(result) // Init self-collection.
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
@ -80,7 +82,7 @@ func (c *counter) Add(v float64) {
|
||||
// CounterVec embeds MetricVec. See there for a full list of methods with
|
||||
// detailed documentation.
|
||||
type CounterVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||
@ -94,20 +96,15 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &CounterVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.Init(result) // Init self-collection.
|
||||
return result
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
42
cmd/vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
42
cmd/vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -1,10 +1,21 @@
|
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -131,31 +142,24 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||
d.err = errors.New("duplicate label names")
|
||||
return d
|
||||
}
|
||||
h := fnv.New64a()
|
||||
var b bytes.Buffer // To copy string contents into, avoiding []byte allocations.
|
||||
vh := hashNew()
|
||||
for _, val := range labelValues {
|
||||
b.Reset()
|
||||
b.WriteString(val)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
vh = hashAdd(vh, val)
|
||||
vh = hashAddByte(vh, separatorByte)
|
||||
}
|
||||
d.id = h.Sum64()
|
||||
d.id = vh
|
||||
// Sort labelNames so that order doesn't matter for the hash.
|
||||
sort.Strings(labelNames)
|
||||
// Now hash together (in this order) the help string and the sorted
|
||||
// label names.
|
||||
h.Reset()
|
||||
b.Reset()
|
||||
b.WriteString(help)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
lh := hashNew()
|
||||
lh = hashAdd(lh, help)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
for _, labelName := range labelNames {
|
||||
b.Reset()
|
||||
b.WriteString(labelName)
|
||||
b.WriteByte(separatorByte)
|
||||
h.Write(b.Bytes())
|
||||
lh = hashAdd(lh, labelName)
|
||||
lh = hashAddByte(lh, separatorByte)
|
||||
}
|
||||
d.dimHash = h.Sum64()
|
||||
d.dimHash = lh
|
||||
|
||||
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
|
||||
for n, v := range constLabels {
|
||||
|
178
cmd/vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
178
cmd/vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@ -11,18 +11,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides embeddable metric primitives for servers and
|
||||
// standardized exposition of telemetry through a web services interface.
|
||||
// Package prometheus provides metrics primitives to instrument code for
|
||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||
// Pushgateway (package push).
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
//specified otherwise.
|
||||
//
|
||||
// To expose metrics registered with the Prometheus registry, an HTTP server
|
||||
// needs to know about the Prometheus handler. The usual endpoint is "/metrics".
|
||||
// A Basic Example
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
//
|
||||
// As a starting point a very basic usage example:
|
||||
// As a starting point, a very basic usage example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
@ -30,6 +29,7 @@
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var (
|
||||
@ -37,73 +37,145 @@
|
||||
// Name: "cpu_temperature_celsius",
|
||||
// Help: "Current temperature of the CPU.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// })
|
||||
// hdFailures = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hd_errors_total",
|
||||
// Help: "Number of hard-disk errors.",
|
||||
// },
|
||||
// []string{"device"},
|
||||
// )
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// // Metrics have to be registered to be exposed:
|
||||
// prometheus.MustRegister(cpuTemp)
|
||||
// prometheus.MustRegister(hdFailures)
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// cpuTemp.Set(65.3)
|
||||
// hdFailures.Inc()
|
||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||
//
|
||||
// http.Handle("/metrics", prometheus.Handler())
|
||||
// // The Handler function provides a default handler to expose metrics
|
||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":8080", nil)
|
||||
// }
|
||||
//
|
||||
//
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter.
|
||||
// It also exports some stats about the HTTP usage of the /metrics
|
||||
// endpoint. (See the Handler function for more detail.)
|
||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||
//
|
||||
// Two more advanced metric types are the Summary and Histogram.
|
||||
// Metrics
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary, and
|
||||
// Histogram, a very important part of the Prometheus data model is the
|
||||
// partitioning of samples along dimensions called labels, which results in
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. Hovever, in addition to the basic plumbing shown in the example
|
||||
// above, you only need to understand the different metric types and their
|
||||
// vector versions for basic usage.
|
||||
//
|
||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||
// of those four metric types can be found in the Prometheus docs:
|
||||
// https://prometheus.io/docs/concepts/metric_types/
|
||||
//
|
||||
// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
|
||||
// Prometheus server not to assume anything about its type.
|
||||
//
|
||||
// In addition to the fundamental metric types Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped, a very important part of the Prometheus data model is
|
||||
// the partitioning of samples along dimensions called labels, which results in
|
||||
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
|
||||
// and HistogramVec.
|
||||
// HistogramVec, and UntypedVec.
|
||||
//
|
||||
// Those are all the parts needed for basic usage. Detailed documentation and
|
||||
// examples are provided below.
|
||||
// While only the fundamental metric types implement the Metric interface, both
|
||||
// the metrics and their vector versions implement the Collector interface. A
|
||||
// Collector manages the collection of a number of Metrics, but for convenience,
|
||||
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
|
||||
// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
|
||||
// SummaryVec, HistogramVec, and UntypedVec are not.
|
||||
//
|
||||
// Everything else this package offers is essentially for "power users" only. A
|
||||
// few pointers to "power user features":
|
||||
// To create instances of Metrics and their vector versions, you need a suitable
|
||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
|
||||
// HistogramOpts, or UntypedOpts.
|
||||
//
|
||||
// All the various ...Opts structs have a ConstLabels field for labels that
|
||||
// never change their value (which is only useful under special circumstances,
|
||||
// see documentation of the Opts type).
|
||||
// Custom Collectors and constant Metrics
|
||||
//
|
||||
// The Untyped metric behaves like a Gauge, but signals the Prometheus server
|
||||
// not to assume anything about its type.
|
||||
// While you could create your own implementations of Metric, most likely you
|
||||
// will only ever implement the Collector interface on your own. At a first
|
||||
// glance, a custom Collector seems handy to bundle Metrics for common
|
||||
// registration (with the prime example of the different metric vectors above,
|
||||
// which bundle all the metrics of the same name but with different labels).
|
||||
//
|
||||
// Functions to fine-tune how the metric registry works: EnableCollectChecks,
|
||||
// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook.
|
||||
// There is a more involved use case, too: If you already have metrics
|
||||
// available, created outside of the Prometheus context, you don't need the
|
||||
// interface of the various Metric types. You essentially want to mirror the
|
||||
// existing numbers into Prometheus Metrics during collection. An own
|
||||
// implementation of the Collector interface is perfect for that. You can create
|
||||
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created
|
||||
// later. NewDesc comes in handy to create those Desc instances.
|
||||
//
|
||||
// For custom metric collection, there are two entry points: Custom Metric
|
||||
// implementations and custom Collector implementations. A Metric is the
|
||||
// fundamental unit in the Prometheus data model: a sample at a point in time
|
||||
// together with its meta-data (like its fully-qualified name and any number of
|
||||
// pairs of label name and label value) that knows how to marshal itself into a
|
||||
// data transfer object (aka DTO, implemented as a protocol buffer). A Collector
|
||||
// gets registered with the Prometheus registry and manages the collection of
|
||||
// one or more Metrics. Many parts of this package are building blocks for
|
||||
// Metrics and Collectors. Desc is the metric descriptor, actually used by all
|
||||
// metrics under the hood, and by Collectors to describe the Metrics to be
|
||||
// collected, but only to be dealt with by users if they implement their own
|
||||
// Metrics or Collectors. To create a Desc, the BuildFQName function will come
|
||||
// in handy. Other useful components for Metric and Collector implementation
|
||||
// include: LabelPairSorter to sort the DTO version of label pairs,
|
||||
// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at
|
||||
// collection time, MetricVec to bundle custom Metrics into a metric vector
|
||||
// Collector, SelfCollector to make a custom Metric collect itself.
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
|
||||
// metrics) as examples that are used in this package itself.
|
||||
//
|
||||
// A good example for a custom Collector is the ExpVarCollector included in this
|
||||
// package, which exports variables exported via the "expvar" package as
|
||||
// Prometheus metrics.
|
||||
// If you just need to call a function to get a single float value to collect as
|
||||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||
// shortcuts.
|
||||
//
|
||||
// Advanced Uses of the Registry
|
||||
//
|
||||
// While MustRegister is the by far most common way of registering a Collector,
|
||||
// sometimes you might want to handle the errors the registration might
|
||||
// cause. As suggested by the name, MustRegister panics if an error occurs. With
|
||||
// the Register function, the error is returned and can be handled.
|
||||
//
|
||||
// An error is returned if the registered Collector is incompatible or
|
||||
// inconsistent with already registered metrics. The registry aims for
|
||||
// consistency of the collected metrics according to the Prometheus data
|
||||
// model. Inconsistencies are ideally detected at registration time, not at
|
||||
// collect time. The former will usually be detected at start-up time of a
|
||||
// program, while the latter will only happen at scrape time, possibly not even
|
||||
// on the first scrape if the inconsistency only becomes relevant later. That is
|
||||
// the main reason why a Collector and a Metric have to describe themselves to
|
||||
// the registry.
|
||||
//
|
||||
// So far, everything we did operated on the so-called default registry, as it
|
||||
// can be found in the global DefaultRegistry variable. With NewRegistry, you
|
||||
// can create a custom registry, or you can even implement the Registerer or
|
||||
// Gatherer interfaces yourself. The methods Register and Unregister work in
|
||||
// the same way on a custom registry as the global functions Register and
|
||||
// Unregister on the default registry.
|
||||
//
|
||||
// There are a number of uses for custom registries: You can use registries
|
||||
// with special properties, see NewPedanticRegistry. You can avoid global state,
|
||||
// as it is imposed by the DefaultRegistry. You can use multiple registries at
|
||||
// the same time to expose different metrics in different ways. You can use
|
||||
// separate registries for testing purposes.
|
||||
//
|
||||
// Also note that the DefaultRegistry comes registered with a Collector for Go
|
||||
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
|
||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||
// yourself about the Collectors to register.
|
||||
//
|
||||
// HTTP Exposition
|
||||
//
|
||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||
// above. The tools to expose metrics via HTTP are in the promhttp
|
||||
// sub-package. (The top-level functions in the prometheus package are
|
||||
// deprecated.)
|
||||
//
|
||||
// Pushing to the Pushgateway
|
||||
//
|
||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||
//
|
||||
// Other Means of Exposition
|
||||
//
|
||||
// More ways of exposing metrics can easily be added. Sending metrics to
|
||||
// Graphite would be an example that will soon be implemented.
|
||||
package prometheus
|
||||
|
@ -18,21 +18,21 @@ import (
|
||||
"expvar"
|
||||
)
|
||||
|
||||
// ExpvarCollector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the ExpvarCollector is inherently
|
||||
// slow. Thus, the ExpvarCollector is probably great for experiments and
|
||||
// prototying, but you should seriously consider a more direct implementation of
|
||||
// Prometheus metrics for monitoring production systems.
|
||||
//
|
||||
// Use NewExpvarCollector to create new instances.
|
||||
type ExpvarCollector struct {
|
||||
type expvarCollector struct {
|
||||
exports map[string]*Desc
|
||||
}
|
||||
|
||||
// NewExpvarCollector returns a newly allocated ExpvarCollector that still has
|
||||
// to be registered with the Prometheus registry.
|
||||
// NewExpvarCollector returns a newly allocated expvar Collector that still has
|
||||
// to be registered with a Prometheus registry.
|
||||
//
|
||||
// An expvar Collector collects metrics from the expvar interface. It provides a
|
||||
// quick way to expose numeric values that are already exported via expvar as
|
||||
// Prometheus metrics. Note that the data models of expvar and Prometheus are
|
||||
// fundamentally different, and that the expvar Collector is inherently slower
|
||||
// than native Prometheus metrics. Thus, the expvar Collector is probably great
|
||||
// for experiments and prototying, but you should seriously consider a more
|
||||
// direct implementation of Prometheus metrics for monitoring production
|
||||
// systems.
|
||||
//
|
||||
// The exports map has the following meaning:
|
||||
//
|
||||
@ -59,21 +59,21 @@ type ExpvarCollector struct {
|
||||
// sample values.
|
||||
//
|
||||
// Anything that does not fit into the scheme above is silently ignored.
|
||||
func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector {
|
||||
return &ExpvarCollector{
|
||||
func NewExpvarCollector(exports map[string]*Desc) Collector {
|
||||
return &expvarCollector{
|
||||
exports: exports,
|
||||
}
|
||||
}
|
||||
|
||||
// Describe implements Collector.
|
||||
func (e *ExpvarCollector) Describe(ch chan<- *Desc) {
|
||||
func (e *expvarCollector) Describe(ch chan<- *Desc) {
|
||||
for _, desc := range e.exports {
|
||||
ch <- desc
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements Collector.
|
||||
func (e *ExpvarCollector) Collect(ch chan<- Metric) {
|
||||
func (e *expvarCollector) Collect(ch chan<- Metric) {
|
||||
for name, desc := range e.exports {
|
||||
var m Metric
|
||||
expVar := expvar.Get(name)
|
29
cmd/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
29
cmd/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
15
cmd/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
15
cmd/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -13,8 +13,6 @@
|
||||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
@ -60,7 +58,7 @@ func NewGauge(opts GaugeOpts) Gauge {
|
||||
// (e.g. number of operations queued, partitioned by user and operation
|
||||
// type). Create instances with NewGaugeVec.
|
||||
type GaugeVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||
@ -74,14 +72,9 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &GaugeVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
4
cmd/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
4
cmd/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -17,7 +17,7 @@ type goCollector struct {
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
func NewGoCollector() *goCollector {
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutines: NewGauge(GaugeOpts{
|
||||
Namespace: "go",
|
||||
@ -211,7 +211,7 @@ func NewGoCollector() *goCollector {
|
||||
"Number of seconds since 1970 of last garbage collection.",
|
||||
nil, nil,
|
||||
),
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC*10 ^ 9) },
|
||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
||||
valType: GaugeValue,
|
||||
},
|
||||
},
|
||||
|
26
cmd/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
26
cmd/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -15,7 +15,6 @@ package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
@ -52,11 +51,11 @@ type Histogram interface {
|
||||
// bucket of a histogram ("le" -> "less or equal").
|
||||
const bucketLabel = "le"
|
||||
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a network
|
||||
// service. Most likely, however, you will be required to define buckets
|
||||
// customized to your use case.
|
||||
var (
|
||||
// DefBuckets are the default Histogram buckets. The default buckets are
|
||||
// tailored to broadly measure the response time (in seconds) of a
|
||||
// network service. Most likely, however, you will be required to define
|
||||
// buckets customized to your use case.
|
||||
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
||||
|
||||
errBucketLabelNotAllowed = fmt.Errorf(
|
||||
@ -211,7 +210,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
|
||||
h.Init(h) // Init self-collection.
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
@ -223,7 +222,7 @@ type histogram struct {
|
||||
sumBits uint64
|
||||
count uint64
|
||||
|
||||
SelfCollector
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
@ -288,7 +287,7 @@ func (h *histogram) Write(out *dto.Metric) error {
|
||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewHistogramVec.
|
||||
type HistogramVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||
@ -302,14 +301,9 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &HistogramVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newHistogram(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
151
cmd/vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
151
cmd/vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
@ -15,14 +15,114 @@ package prometheus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
)
|
||||
|
||||
// TODO(beorn7): Remove this whole file. It is a partial mirror of
|
||||
// promhttp/http.go (to avoid circular import chains) where everything HTTP
|
||||
// related should live. The functions here are just for avoiding
|
||||
// breakage. Everything is deprecated.
|
||||
|
||||
const (
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
)
|
||||
|
||||
var bufPool sync.Pool
|
||||
|
||||
func getBuf() *bytes.Buffer {
|
||||
buf := bufPool.Get()
|
||||
if buf == nil {
|
||||
return &bytes.Buffer{}
|
||||
}
|
||||
return buf.(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func giveBuf(buf *bytes.Buffer) {
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the DefaultGatherer. It is
|
||||
// already instrumented with InstrumentHandler (using "prometheus" as handler
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||
// (which is non instrumented).
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := expfmt.Negotiate(req.Header)
|
||||
buf := getBuf()
|
||||
defer giveBuf(buf)
|
||||
writer, encoding := decorateWriter(req, buf)
|
||||
enc := expfmt.NewEncoder(writer, contentType)
|
||||
var lastErr error
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
lastErr = err
|
||||
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
if closer, ok := writer.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
if lastErr != nil && buf.Len() == 0 {
|
||||
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
header := w.Header()
|
||||
header.Set(contentTypeHeader, string(contentType))
|
||||
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
// returns the decorated writer and the appropriate "Content-Encoding" header
|
||||
// (which is empty if no compression is enabled).
|
||||
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
}
|
||||
return writer, ""
|
||||
}
|
||||
|
||||
var instLabels = []string{"method", "code"}
|
||||
|
||||
type nower interface {
|
||||
@ -57,12 +157,34 @@ func nowSeries(t ...time.Time) nower {
|
||||
// has a constant label named "handler" with the provided handlerName as
|
||||
// value. http_requests_total is a metric vector partitioned by HTTP method
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// Upcoming versions of this package will provide ways of instrumenting HTTP
|
||||
// handlers that are more flexible and have fewer issues. Please prefer direct
|
||||
// instrumentation in the meantime.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFunc wraps the given function for instrumentation. It
|
||||
// otherwise works in the same way as InstrumentHandler.
|
||||
// otherwise works in the same way as InstrumentHandler (and shares the same
|
||||
// issues).
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(
|
||||
SummaryOpts{
|
||||
@ -73,13 +195,13 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
||||
)
|
||||
}
|
||||
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler but provides more
|
||||
// flexibility (at the cost of a more complex call syntax). As
|
||||
// InstrumentHandler, this function registers four metric collectors, but it
|
||||
// uses the provided SummaryOpts to create them. However, the fields "Name" and
|
||||
// "Help" in the SummaryOpts are ignored. "Name" is replaced by
|
||||
// "requests_total", "request_duration_microseconds", "request_size_bytes", and
|
||||
// "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
|
||||
// issues) but provides more flexibility (at the cost of a more complex call
|
||||
// syntax). As InstrumentHandler, this function registers four metric
|
||||
// collectors, but it uses the provided SummaryOpts to create them. However, the
|
||||
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
|
||||
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
|
||||
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
|
||||
// help string. The names of the variable labels of the http_requests_total
|
||||
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
|
||||
//
|
||||
@ -98,13 +220,20 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
|
||||
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
|
||||
// and all its fields are set to the equally named fields in the provided
|
||||
// SummaryOpts.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
|
||||
// InstrumentHandler is.
|
||||
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
|
||||
}
|
||||
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc but provides
|
||||
// more flexibility (at the cost of a more complex call syntax). See
|
||||
// InstrumentHandlerWithOpts for details how the provided SummaryOpts are used.
|
||||
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
|
||||
// the same issues) but provides more flexibility (at the cost of a more complex
|
||||
// call syntax). See InstrumentHandlerWithOpts for details how the provided
|
||||
// SummaryOpts are used.
|
||||
//
|
||||
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
|
||||
// as InstrumentHandler is.
|
||||
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
|
||||
reqCnt := NewCounterVec(
|
||||
CounterOpts{
|
||||
|
34
cmd/vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
34
cmd/vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -22,10 +22,8 @@ import (
|
||||
const separatorByte byte = 255
|
||||
|
||||
// A Metric models a single sample value with its meta data being exported to
|
||||
// Prometheus. Implementers of Metric in this package inclued Gauge, Counter,
|
||||
// Untyped, and Summary. Users can implement their own Metric types, but that
|
||||
// should be rarely needed. See the example for SelfCollector, which is also an
|
||||
// example for a user-implemented Metric.
|
||||
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
|
||||
// Histogram, Summary, and Untyped.
|
||||
type Metric interface {
|
||||
// Desc returns the descriptor for the Metric. This method idempotently
|
||||
// returns the same descriptor throughout the lifetime of the
|
||||
@ -36,21 +34,23 @@ type Metric interface {
|
||||
// Write encodes the Metric into a "Metric" Protocol Buffer data
|
||||
// transmission object.
|
||||
//
|
||||
// Implementers of custom Metric types must observe concurrency safety
|
||||
// as reads of this metric may occur at any time, and any blocking
|
||||
// occurs at the expense of total performance of rendering all
|
||||
// registered metrics. Ideally Metric implementations should support
|
||||
// concurrent readers.
|
||||
// Metric implementations must observe concurrency safety as reads of
|
||||
// this metric may occur at any time, and any blocking occurs at the
|
||||
// expense of total performance of rendering all registered
|
||||
// metrics. Ideally, Metric implementations should support concurrent
|
||||
// readers.
|
||||
//
|
||||
// The Prometheus client library attempts to minimize memory allocations
|
||||
// and will provide a pre-existing reset dto.Metric pointer. Prometheus
|
||||
// may recycle the dto.Metric proto message, so Metric implementations
|
||||
// should just populate the provided dto.Metric and then should not keep
|
||||
// any reference to it.
|
||||
//
|
||||
// While populating dto.Metric, labels must be sorted lexicographically.
|
||||
// (Implementers may find LabelPairSorter useful for that.)
|
||||
// While populating dto.Metric, it is the responsibility of the
|
||||
// implementation to ensure validity of the Metric protobuf (like valid
|
||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||
// recommended to sort labels lexicographically. (Implementers may find
|
||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||
// sure of sorting if they depend on it.
|
||||
Write(*dto.Metric) error
|
||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||
// dto.Metric protobuf to save allocations has disappeared. The
|
||||
// signature of this method should be changed to "Write() (*dto.Metric,
|
||||
// error)".
|
||||
}
|
||||
|
||||
// Opts bundles the options for creating most Metric types. Each metric
|
||||
|
4
cmd/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
4
cmd/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@ -28,7 +28,7 @@ type processCollector struct {
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including cpu, memory and file descriptor usage as well as
|
||||
// the process start time for the given process id under the given namespace.
|
||||
func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||
func NewProcessCollector(pid int, namespace string) Collector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
@ -43,7 +43,7 @@ func NewProcessCollector(pid int, namespace string) *processCollector {
|
||||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) *processCollector {
|
||||
) Collector {
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
|
65
cmd/vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
65
cmd/vendor/github.com/prometheus/client_golang/prometheus/push.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright (c) 2013, The Prometheus Authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Push triggers a metric collection by the default registry and pushes all
|
||||
// collected metrics to the Pushgateway specified by addr. See the Pushgateway
|
||||
// documentation for detailed implications of the job and instance
|
||||
// parameter. instance can be left empty. You can use just host:port or ip:port
|
||||
// as url, in which case 'http://' is added automatically. You can also include
|
||||
// the schema in the URL. However, do not include the '/metrics/jobs/...' part.
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and instance will
|
||||
// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT'
|
||||
// to push to the Pushgateway.)
|
||||
func Push(job, instance, url string) error {
|
||||
return defRegistry.Push(job, instance, url, "PUT")
|
||||
}
|
||||
|
||||
// PushAdd works like Push, but only previously pushed metrics with the same
|
||||
// name (and the same job and instance) will be replaced. (It uses HTTP method
|
||||
// 'POST' to push to the Pushgateway.)
|
||||
func PushAdd(job, instance, url string) error {
|
||||
return defRegistry.Push(job, instance, url, "POST")
|
||||
}
|
||||
|
||||
// PushCollectors works like Push, but it does not collect from the default
|
||||
// registry. Instead, it collects from the provided collectors. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func PushCollectors(job, instance, url string, collectors ...Collector) error {
|
||||
return pushCollectors(job, instance, url, "PUT", collectors...)
|
||||
}
|
||||
|
||||
// PushAddCollectors works like PushAdd, but it does not collect from the
|
||||
// default registry. Instead, it collects from the provided collectors. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func PushAddCollectors(job, instance, url string, collectors ...Collector) error {
|
||||
return pushCollectors(job, instance, url, "POST", collectors...)
|
||||
}
|
||||
|
||||
func pushCollectors(job, instance, url, method string, collectors ...Collector) error {
|
||||
r := newRegistry()
|
||||
for _, collector := range collectors {
|
||||
if _, err := r.Register(collector); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return r.Push(job, instance, url, method)
|
||||
}
|
982
cmd/vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
982
cmd/vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
File diff suppressed because it is too large
Load Diff
30
cmd/vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
30
cmd/vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -15,7 +15,6 @@ package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -54,8 +53,8 @@ type Summary interface {
|
||||
Observe(float64)
|
||||
}
|
||||
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
var (
|
||||
// DefObjectives are the default Summary quantile values.
|
||||
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
|
||||
|
||||
errQuantileLabelNotAllowed = fmt.Errorf(
|
||||
@ -140,11 +139,11 @@ type SummaryOpts struct {
|
||||
BufCap uint32
|
||||
}
|
||||
|
||||
// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge
|
||||
// method of perk/quantile is actually not working as advertised - and it might
|
||||
// be unfixable, as the underlying algorithm is apparently not capable of
|
||||
// merging summaries in the first place. To avoid using Merge, we are currently
|
||||
// adding observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// Great fuck-up with the sliding-window decay algorithm... The Merge method of
|
||||
// perk/quantile is actually not working as advertised - and it might be
|
||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||
// summaries in the first place. To avoid using Merge, we are currently adding
|
||||
// observations to _each_ age bucket, i.e. the effort to add a sample is
|
||||
// essentially multiplied by the number of age buckets. When rotating age
|
||||
// buckets, we empty the previous head stream. On scrape time, we simply take
|
||||
// the quantiles from the head stream (no merging required). Result: More effort
|
||||
@ -228,12 +227,12 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||
}
|
||||
sort.Float64s(s.sortedObjectives)
|
||||
|
||||
s.Init(s) // Init self-collection.
|
||||
s.init(s) // Init self-collection.
|
||||
return s
|
||||
}
|
||||
|
||||
type summary struct {
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
|
||||
mtx sync.Mutex // Protects every other moving part.
|
||||
@ -391,7 +390,7 @@ func (s quantSort) Less(i, j int) bool {
|
||||
// (e.g. HTTP request latencies, partitioned by status code and method). Create
|
||||
// instances with NewSummaryVec.
|
||||
type SummaryVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
@ -405,14 +404,9 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &SummaryVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newSummary(desc, opts, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
15
cmd/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
15
cmd/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
generated
vendored
@ -13,8 +13,6 @@
|
||||
|
||||
package prometheus
|
||||
|
||||
import "hash/fnv"
|
||||
|
||||
// Untyped is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
@ -58,7 +56,7 @@ func NewUntyped(opts UntypedOpts) Untyped {
|
||||
// labels. This is used if you want to count the same thing partitioned by
|
||||
// various dimensions. Create instances with NewUntypedVec.
|
||||
type UntypedVec struct {
|
||||
MetricVec
|
||||
*MetricVec
|
||||
}
|
||||
|
||||
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
|
||||
@ -72,14 +70,9 @@ func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
|
||||
opts.ConstLabels,
|
||||
)
|
||||
return &UntypedVec{
|
||||
MetricVec: MetricVec{
|
||||
children: map[uint64]Metric{},
|
||||
desc: desc,
|
||||
hash: fnv.New64a(),
|
||||
newMetric: func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
},
|
||||
},
|
||||
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, UntypedValue, 0, lvs...)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
8
cmd/vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
8
cmd/vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -48,7 +48,7 @@ type value struct {
|
||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
@ -68,7 +68,7 @@ func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...strin
|
||||
valBits: math.Float64bits(val),
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
result.Init(result)
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ func (v *value) Write(out *dto.Metric) error {
|
||||
// library to back the implementations of CounterFunc, GaugeFunc, and
|
||||
// UntypedFunc.
|
||||
type valueFunc struct {
|
||||
SelfCollector
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
@ -134,7 +134,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
|
||||
function: function,
|
||||
labelPairs: makeLabelPairs(desc, nil),
|
||||
}
|
||||
result.Init(result)
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
|
253
cmd/vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
253
cmd/vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -14,10 +14,10 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// MetricVec is a Collector to bundle metrics of the same name that
|
||||
@ -26,17 +26,32 @@ import (
|
||||
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
|
||||
// provided in this package.
|
||||
type MetricVec struct {
|
||||
mtx sync.RWMutex // Protects not only children, but also hash and buf.
|
||||
children map[uint64]Metric
|
||||
mtx sync.RWMutex // Protects the children.
|
||||
children map[uint64][]metricWithLabelValues
|
||||
desc *Desc
|
||||
|
||||
// hash is our own hash instance to avoid repeated allocations.
|
||||
hash hash.Hash64
|
||||
// buf is used to copy string contents into it for hashing,
|
||||
// again to avoid allocations.
|
||||
buf bytes.Buffer
|
||||
newMetric func(labelValues ...string) Metric
|
||||
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
|
||||
hashAddByte func(h uint64, b byte) uint64
|
||||
}
|
||||
|
||||
newMetric func(labelValues ...string) Metric
|
||||
// newMetricVec returns an initialized MetricVec. The concrete value is
|
||||
// returned for embedding into another struct.
|
||||
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
||||
return &MetricVec{
|
||||
children: map[uint64][]metricWithLabelValues{},
|
||||
desc: desc,
|
||||
newMetric: newMetric,
|
||||
hashAdd: hashAdd,
|
||||
hashAddByte: hashAddByte,
|
||||
}
|
||||
}
|
||||
|
||||
// metricWithLabelValues provides the metric and its label values for
|
||||
// disambiguation on hash collision.
|
||||
type metricWithLabelValues struct {
|
||||
values []string
|
||||
metric Metric
|
||||
}
|
||||
|
||||
// Describe implements Collector. The length of the returned slice
|
||||
@ -50,8 +65,10 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||
m.mtx.RLock()
|
||||
defer m.mtx.RUnlock()
|
||||
|
||||
for _, metric := range m.children {
|
||||
ch <- metric
|
||||
for _, metrics := range m.children {
|
||||
for _, metric := range metrics {
|
||||
ch <- metric.metric
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,14 +97,12 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
|
||||
// with a performance overhead (for creating and processing the Labels map).
|
||||
// See also the GaugeVec example.
|
||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabelValues(lvs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
|
||||
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
|
||||
}
|
||||
|
||||
// GetMetricWith returns the Metric for the given Labels map (the label names
|
||||
@ -103,18 +118,12 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
|
||||
// methods.
|
||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
h, err := m.hashLabels(labels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lvs := make([]string, len(labels))
|
||||
for i, label := range m.desc.variableLabels {
|
||||
lvs[i] = labels[label]
|
||||
}
|
||||
return m.getOrCreateMetric(h, lvs...), nil
|
||||
|
||||
return m.getOrCreateMetricWithLabels(h, labels), nil
|
||||
}
|
||||
|
||||
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
|
||||
@ -162,11 +171,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
return true
|
||||
return m.deleteByHashWithLabelValues(h, lvs)
|
||||
}
|
||||
|
||||
// Delete deletes the metric where the variable labels are the same as those
|
||||
@ -187,10 +192,50 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, has := m.children[h]; !has {
|
||||
|
||||
return m.deleteByHashWithLabels(h, labels)
|
||||
}
|
||||
|
||||
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
|
||||
// there are multiple matches in the bucket, use lvs to select a metric and
|
||||
// remove only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
|
||||
metrics, ok := m.children[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
delete(m.children, h)
|
||||
|
||||
i := m.findMetricWithLabelValues(metrics, lvs)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
|
||||
// are multiple matches in the bucket, use lvs to select a metric and remove
|
||||
// only that metric.
|
||||
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
|
||||
metrics, ok := m.children[h]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
i := m.findMetricWithLabels(metrics, labels)
|
||||
if i >= len(metrics) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(metrics) > 1 {
|
||||
m.children[h] = append(metrics[:i], metrics[i+1:]...)
|
||||
} else {
|
||||
delete(m.children, h)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@ -208,40 +253,152 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
|
||||
if len(vals) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
h := hashNew()
|
||||
for _, val := range vals {
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
||||
if len(labels) != len(m.desc.variableLabels) {
|
||||
return 0, errInconsistentCardinality
|
||||
}
|
||||
m.hash.Reset()
|
||||
h := hashNew()
|
||||
for _, label := range m.desc.variableLabels {
|
||||
val, ok := labels[label]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||
}
|
||||
m.buf.Reset()
|
||||
m.buf.WriteString(val)
|
||||
m.hash.Write(m.buf.Bytes())
|
||||
h = m.hashAdd(h, val)
|
||||
h = m.hashAddByte(h, model.SeparatorByte)
|
||||
}
|
||||
return m.hash.Sum64(), nil
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric {
|
||||
metric, ok := m.children[hash]
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabelValues(hash, lvs)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabelValues(hash, lvs)
|
||||
if !ok {
|
||||
// Copy labelValues. Otherwise, they would be allocated even if we don't go
|
||||
// down this code path.
|
||||
copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...)
|
||||
metric = m.newMetric(copiedLabelValues...)
|
||||
m.children[hash] = metric
|
||||
// Copy to avoid allocation in case wo don't go down this code path.
|
||||
copiedLVs := make([]string, len(lvs))
|
||||
copy(copiedLVs, lvs)
|
||||
metric = m.newMetric(copiedLVs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||
// or creates it and returns the new one.
|
||||
//
|
||||
// This function holds the mutex.
|
||||
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
|
||||
m.mtx.RLock()
|
||||
metric, ok := m.getMetricWithLabels(hash, labels)
|
||||
m.mtx.RUnlock()
|
||||
if ok {
|
||||
return metric
|
||||
}
|
||||
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
metric, ok = m.getMetricWithLabels(hash, labels)
|
||||
if !ok {
|
||||
lvs := m.extractLabelValues(labels)
|
||||
metric = m.newMetric(lvs...)
|
||||
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
|
||||
}
|
||||
return metric
|
||||
}
|
||||
|
||||
// getMetricWithLabelValues gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getMetricWithLabels gets a metric while handling possible collisions in
|
||||
// the hash space. Must be called while holding read mutex.
|
||||
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
|
||||
metrics, ok := m.children[h]
|
||||
if ok {
|
||||
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
|
||||
return metrics[i].metric, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// findMetricWithLabelValues returns the index of the matching metric or
|
||||
// len(metrics) if not found.
|
||||
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabelValues(metric.values, lvs) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
// findMetricWithLabels returns the index of the matching metric or len(metrics)
|
||||
// if not found.
|
||||
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
|
||||
for i, metric := range metrics {
|
||||
if m.matchLabels(metric.values, labels) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(metrics)
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
|
||||
if len(values) != len(lvs) {
|
||||
return false
|
||||
}
|
||||
for i, v := range values {
|
||||
if v != lvs[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
|
||||
if len(labels) != len(values) {
|
||||
return false
|
||||
}
|
||||
for i, k := range m.desc.variableLabels {
|
||||
if values[i] != labels[k] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MetricVec) extractLabelValues(labels Labels) []string {
|
||||
labelValues := make([]string, len(labels))
|
||||
for i, k := range m.desc.variableLabels {
|
||||
labelValues[i] = labels[k]
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
|
49
cmd/vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
49
cmd/vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@ -46,10 +46,7 @@ func ResponseFormat(h http.Header) Format {
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
const (
|
||||
textType = "text/plain"
|
||||
jsonType = "application/json"
|
||||
)
|
||||
const textType = "text/plain"
|
||||
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
@ -66,22 +63,6 @@ func ResponseFormat(h http.Header) Format {
|
||||
return FmtUnknown
|
||||
}
|
||||
return FmtText
|
||||
|
||||
case jsonType:
|
||||
var prometheusAPIVersion string
|
||||
|
||||
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
|
||||
prometheusAPIVersion = params["version"]
|
||||
} else {
|
||||
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
|
||||
}
|
||||
|
||||
switch prometheusAPIVersion {
|
||||
case "0.0.2", "":
|
||||
return fmtJSON2
|
||||
default:
|
||||
return FmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
return FmtUnknown
|
||||
@ -93,8 +74,6 @@ func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
case fmtJSON2:
|
||||
return newJSON2Decoder(r)
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
@ -107,10 +86,32 @@ type protoDecoder struct {
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.ReadDelimited(d.r, v)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||
return fmt.Errorf("invalid metric name %q", v.GetName())
|
||||
}
|
||||
for _, m := range v.GetMetric() {
|
||||
if m == nil {
|
||||
continue
|
||||
}
|
||||
for _, l := range m.GetLabel() {
|
||||
if l == nil {
|
||||
continue
|
||||
}
|
||||
if !model.LabelValue(l.GetValue()).IsValid() {
|
||||
return fmt.Errorf("invalid label value %q", l.GetValue())
|
||||
}
|
||||
if !model.LabelName(l.GetName()).IsValid() {
|
||||
return fmt.Errorf("invalid label name %q", l.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// textDecoder implements the Decoder interface for the text protcol.
|
||||
// textDecoder implements the Decoder interface for the text protocol.
|
||||
type textDecoder struct {
|
||||
r io.Reader
|
||||
p TextParser
|
||||
|
2
cmd/vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
2
cmd/vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@ -18,9 +18,9 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"bitbucket.org/ww/goautoneg"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
3
cmd/vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
3
cmd/vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@ -29,9 +29,6 @@ const (
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
|
||||
// fmtJSON2 is hidden as it is deprecated.
|
||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
||||
)
|
||||
|
||||
const (
|
||||
|
4
cmd/vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
4
cmd/vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
@ -20,8 +20,8 @@ import "bytes"
|
||||
|
||||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
||||
//
|
||||
// go-fuzz-build github.com/prometheus/client_golang/text
|
||||
// go-fuzz -bin text-fuzz.zip -workdir fuzz
|
||||
// go-fuzz-build github.com/prometheus/common/expfmt
|
||||
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
||||
//
|
||||
// Further input samples should go in the folder fuzz/corpus.
|
||||
func Fuzz(in []byte) int {
|
||||
|
162
cmd/vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
162
cmd/vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
@ -1,162 +0,0 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
type json2Decoder struct {
|
||||
dec *json.Decoder
|
||||
fams []*dto.MetricFamily
|
||||
}
|
||||
|
||||
func newJSON2Decoder(r io.Reader) Decoder {
|
||||
return &json2Decoder{
|
||||
dec: json.NewDecoder(r),
|
||||
}
|
||||
}
|
||||
|
||||
type histogram002 struct {
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
Values map[string]float64 `json:"value"`
|
||||
}
|
||||
|
||||
type counter002 struct {
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
||||
labels := base.Clone().Merge(ext)
|
||||
delete(labels, model.MetricNameLabel)
|
||||
|
||||
names := make([]string, 0, len(labels))
|
||||
for ln := range labels {
|
||||
names = append(names, string(ln))
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
pairs := make([]*dto.LabelPair, 0, len(labels))
|
||||
|
||||
for _, ln := range names {
|
||||
lv := labels[model.LabelName(ln)]
|
||||
|
||||
pairs = append(pairs, &dto.LabelPair{
|
||||
Name: proto.String(ln),
|
||||
Value: proto.String(string(lv)),
|
||||
})
|
||||
}
|
||||
|
||||
return pairs
|
||||
}
|
||||
|
||||
func (d *json2Decoder) more() error {
|
||||
var entities []struct {
|
||||
BaseLabels model.LabelSet `json:"baseLabels"`
|
||||
Docstring string `json:"docstring"`
|
||||
Metric struct {
|
||||
Type string `json:"type"`
|
||||
Values json.RawMessage `json:"value"`
|
||||
} `json:"metric"`
|
||||
}
|
||||
|
||||
if err := d.dec.Decode(&entities); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range entities {
|
||||
f := &dto.MetricFamily{
|
||||
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
|
||||
Help: proto.String(e.Docstring),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{},
|
||||
}
|
||||
|
||||
d.fams = append(d.fams, f)
|
||||
|
||||
switch e.Metric.Type {
|
||||
case "counter", "gauge":
|
||||
var values []counter002
|
||||
|
||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||
}
|
||||
|
||||
for _, ctr := range values {
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, ctr.Labels),
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(ctr.Value),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
case "histogram":
|
||||
var values []histogram002
|
||||
|
||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||
}
|
||||
|
||||
for _, hist := range values {
|
||||
quants := make([]string, 0, len(values))
|
||||
for q := range hist.Values {
|
||||
quants = append(quants, q)
|
||||
}
|
||||
|
||||
sort.Strings(quants)
|
||||
|
||||
for _, q := range quants {
|
||||
value := hist.Values[q]
|
||||
// The correct label is "quantile" but to not break old expressions
|
||||
// this remains "percentile"
|
||||
hist.Labels["percentile"] = model.LabelValue(q)
|
||||
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, hist.Labels),
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(value),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
|
||||
if len(d.fams) == 0 {
|
||||
if err := d.more(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*v = *d.fams[0]
|
||||
d.fams = d.fams[1:]
|
||||
|
||||
return nil
|
||||
}
|
30
cmd/vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
30
cmd/vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
@ -14,7 +14,6 @@
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@ -26,9 +25,12 @@ import (
|
||||
|
||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||
// and any error encountered. This function does not perform checks on the
|
||||
// content of the metric and label names, i.e. invalid metric or label names
|
||||
// and any error encountered. The output will have the same order as the input,
|
||||
// no further sorting is performed. Furthermore, this function assumes the input
|
||||
// is already sanitized and does not perform any sanity checks. If the input
|
||||
// contains duplicate metrics or invalid metric or label names, the conversion
|
||||
// will result in invalid text format output.
|
||||
//
|
||||
// This method fulfills the type 'prometheus.encoder'.
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||
var written int
|
||||
@ -285,21 +287,17 @@ func labelPairsToText(
|
||||
return written, nil
|
||||
}
|
||||
|
||||
var (
|
||||
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||
)
|
||||
|
||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||
// includeDoubleQuote is true - '"' by '\"'.
|
||||
func escapeString(v string, includeDoubleQuote bool) string {
|
||||
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
||||
for _, c := range v {
|
||||
switch {
|
||||
case c == '\\':
|
||||
result.WriteString(`\\`)
|
||||
case includeDoubleQuote && c == '"':
|
||||
result.WriteString(`\"`)
|
||||
case c == '\n':
|
||||
result.WriteString(`\n`)
|
||||
default:
|
||||
result.WriteRune(c)
|
||||
}
|
||||
if includeDoubleQuote {
|
||||
return escapeWithDoubleQuote.Replace(v)
|
||||
}
|
||||
return result.String()
|
||||
|
||||
return escape.Replace(v)
|
||||
}
|
||||
|
9
cmd/vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
9
cmd/vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@ -47,7 +47,7 @@ func (e ParseError) Error() string {
|
||||
}
|
||||
|
||||
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
||||
// nil value is ready to use.
|
||||
// zero value is ready to use.
|
||||
type TextParser struct {
|
||||
metricFamiliesByName map[string]*dto.MetricFamily
|
||||
buf *bufio.Reader // Where the parsed input is read through.
|
||||
@ -108,6 +108,13 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
|
||||
delete(p.metricFamiliesByName, k)
|
||||
}
|
||||
}
|
||||
// If p.err is io.EOF now, we have run into a premature end of the input
|
||||
// stream. Turn this error into something nicer and more
|
||||
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
||||
// of an input stream.)
|
||||
if p.err == io.EOF {
|
||||
p.parseError("unexpected end of input stream")
|
||||
}
|
||||
return p.metricFamiliesByName, p.err
|
||||
}
|
||||
|
||||
|
33
cmd/vendor/github.com/prometheus/common/model/alert.go
generated
vendored
33
cmd/vendor/github.com/prometheus/common/model/alert.go
generated
vendored
@ -35,8 +35,9 @@ type Alert struct {
|
||||
Annotations LabelSet `json:"annotations"`
|
||||
|
||||
// The known time range for this alert. Both ends are optional.
|
||||
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||
GeneratorURL string `json:"generatorURL"`
|
||||
}
|
||||
|
||||
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
||||
@ -60,10 +61,16 @@ func (a *Alert) String() string {
|
||||
|
||||
// Resolved returns true iff the activity interval ended in the past.
|
||||
func (a *Alert) Resolved() bool {
|
||||
return a.ResolvedAt(time.Now())
|
||||
}
|
||||
|
||||
// ResolvedAt returns true off the activity interval ended before
|
||||
// the given timestamp.
|
||||
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||
if a.EndsAt.IsZero() {
|
||||
return false
|
||||
}
|
||||
return !a.EndsAt.After(time.Now())
|
||||
return !a.EndsAt.After(ts)
|
||||
}
|
||||
|
||||
// Status returns the status of the alert.
|
||||
@ -74,6 +81,26 @@ func (a *Alert) Status() AlertStatus {
|
||||
return AlertFiring
|
||||
}
|
||||
|
||||
// Validate checks whether the alert data is inconsistent.
|
||||
func (a *Alert) Validate() error {
|
||||
if a.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
}
|
||||
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if err := a.Labels.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid label set: %s", err)
|
||||
}
|
||||
if len(a.Labels) == 0 {
|
||||
return fmt.Errorf("at least one label pair required")
|
||||
}
|
||||
if err := a.Annotations.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid annotations: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Alert is a list of alerts that can be sorted in chronological order.
|
||||
type Alerts []*Alert
|
||||
|
||||
|
42
cmd/vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
Normal file
42
cmd/vendor/github.com/prometheus/common/model/fnv.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
||||
const (
|
||||
offset64 = 14695981039346656037
|
||||
prime64 = 1099511628211
|
||||
)
|
||||
|
||||
// hashNew initializies a new fnv64a hash value.
|
||||
func hashNew() uint64 {
|
||||
return offset64
|
||||
}
|
||||
|
||||
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||
func hashAdd(h uint64, s string) uint64 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint64(s[i])
|
||||
h *= prime64
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||
func hashAddByte(h uint64, b byte) uint64 {
|
||||
h ^= uint64(b)
|
||||
h *= prime64
|
||||
return h
|
||||
}
|
32
cmd/vendor/github.com/prometheus/common/model/labels.go
generated
vendored
32
cmd/vendor/github.com/prometheus/common/model/labels.go
generated
vendored
@ -17,8 +17,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -80,20 +80,37 @@ const (
|
||||
QuantileLabel = "quantile"
|
||||
)
|
||||
|
||||
// LabelNameRE is a regular expression matching valid label names.
|
||||
// LabelNameRE is a regular expression matching valid label names. Note that the
|
||||
// IsValid method of LabelName performs the same check but faster than a match
|
||||
// with this regular expression.
|
||||
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
|
||||
// method, however, does not use LabelNameRE for the check but a much faster
|
||||
// hardcoded implementation.
|
||||
func (ln LabelName) IsValid() bool {
|
||||
if len(ln) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
if !LabelName(s).IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
@ -106,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
if !LabelName(s).IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
@ -139,6 +156,11 @@ func (l LabelNames) String() string {
|
||||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// IsValid returns true iff the string is a valid UTF8.
|
||||
func (lv LabelValue) IsValid() bool {
|
||||
return utf8.ValidString(string(lv))
|
||||
}
|
||||
|
||||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
||||
type LabelValues []LabelValue
|
||||
|
||||
@ -147,7 +169,7 @@ func (l LabelValues) Len() int {
|
||||
}
|
||||
|
||||
func (l LabelValues) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
||||
return string(l[i]) < string(l[j])
|
||||
}
|
||||
|
||||
func (l LabelValues) Swap(i, j int) {
|
||||
|
18
cmd/vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
18
cmd/vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
@ -27,6 +27,21 @@ import (
|
||||
// match.
|
||||
type LabelSet map[LabelName]LabelValue
|
||||
|
||||
// Validate checks whether all names and values in the label set
|
||||
// are valid.
|
||||
func (ls LabelSet) Validate() error {
|
||||
for ln, lv := range ls {
|
||||
if !ln.IsValid() {
|
||||
return fmt.Errorf("invalid name %q", ln)
|
||||
}
|
||||
if !lv.IsValid() {
|
||||
return fmt.Errorf("invalid value %q", lv)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal returns true iff both label sets have exactly the same key/value pairs.
|
||||
func (ls LabelSet) Equal(o LabelSet) bool {
|
||||
if len(ls) != len(o) {
|
||||
return false
|
||||
@ -90,6 +105,7 @@ func (ls LabelSet) Before(o LabelSet) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone returns a copy of the label set.
|
||||
func (ls LabelSet) Clone() LabelSet {
|
||||
lsn := make(LabelSet, len(ls))
|
||||
for ln, lv := range ls {
|
||||
@ -144,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
|
||||
// LabelName as a string and does not call its UnmarshalJSON method.
|
||||
// Thus, we have to replicate the behavior here.
|
||||
for ln := range m {
|
||||
if !LabelNameRE.MatchString(string(ln)) {
|
||||
if !ln.IsValid() {
|
||||
return fmt.Errorf("%q is not a valid label name", ln)
|
||||
}
|
||||
}
|
||||
|
24
cmd/vendor/github.com/prometheus/common/model/metric.go
generated
vendored
24
cmd/vendor/github.com/prometheus/common/model/metric.go
generated
vendored
@ -15,11 +15,18 @@ package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var separator = []byte{0}
|
||||
var (
|
||||
separator = []byte{0}
|
||||
// MetricNameRE is a regular expression matching valid metric
|
||||
// names. Note that the IsValidMetricName function performs the same
|
||||
// check but faster than a match with this regular expression.
|
||||
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
|
||||
)
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
@ -79,3 +86,18 @@ func (m Metric) Fingerprint() Fingerprint {
|
||||
func (m Metric) FastFingerprint() Fingerprint {
|
||||
return LabelSet(m).FastFingerprint()
|
||||
}
|
||||
|
||||
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
|
||||
// This function, however, does not use MetricNameRE for the check but a much
|
||||
// faster hardcoded implementation.
|
||||
func IsValidMetricName(n LabelValue) bool {
|
||||
if len(n) == 0 {
|
||||
return false
|
||||
}
|
||||
for i, b := range n {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
2
cmd/vendor/github.com/prometheus/common/model/model.go
generated
vendored
2
cmd/vendor/github.com/prometheus/common/model/model.go
generated
vendored
@ -12,5 +12,5 @@
|
||||
// limitations under the License.
|
||||
|
||||
// Package model contains common data structures that are shared across
|
||||
// Prometheus componenets and libraries.
|
||||
// Prometheus components and libraries.
|
||||
package model
|
||||
|
108
cmd/vendor/github.com/prometheus/common/model/signature.go
generated
vendored
108
cmd/vendor/github.com/prometheus/common/model/signature.go
generated
vendored
@ -14,11 +14,7 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
||||
@ -28,30 +24,9 @@ const SeparatorByte byte = 255
|
||||
|
||||
var (
|
||||
// cache the signature of an empty label set.
|
||||
emptyLabelSignature = fnv.New64a().Sum64()
|
||||
|
||||
hashAndBufPool sync.Pool
|
||||
emptyLabelSignature = hashNew()
|
||||
)
|
||||
|
||||
type hashAndBuf struct {
|
||||
h hash.Hash64
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
func getHashAndBuf() *hashAndBuf {
|
||||
hb := hashAndBufPool.Get()
|
||||
if hb == nil {
|
||||
return &hashAndBuf{h: fnv.New64a()}
|
||||
}
|
||||
return hb.(*hashAndBuf)
|
||||
}
|
||||
|
||||
func putHashAndBuf(hb *hashAndBuf) {
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
hashAndBufPool.Put(hb)
|
||||
}
|
||||
|
||||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
||||
// given label set. (Collisions are possible but unlikely if the number of label
|
||||
// sets the function is applied to is small.)
|
||||
@ -66,18 +41,14 @@ func LabelsToSignature(labels map[string]string) uint64 {
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(labelName)
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(labels[labelName])
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, labelName)
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, labels[labelName])
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
||||
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
|
||||
@ -93,18 +64,14 @@ func labelSetToFingerprint(ls LabelSet) Fingerprint {
|
||||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(ls[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(ls[labelName]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return Fingerprint(hb.h.Sum64())
|
||||
return Fingerprint(sum)
|
||||
}
|
||||
|
||||
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
|
||||
@ -116,17 +83,12 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
||||
}
|
||||
|
||||
var result uint64
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for labelName, labelValue := range ls {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(labelValue))
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
result ^= hb.h.Sum64()
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
sum := hashNew()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(labelValue))
|
||||
result ^= sum
|
||||
}
|
||||
return Fingerprint(result)
|
||||
}
|
||||
@ -136,24 +98,20 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
||||
// specified LabelNames into the signature calculation. The labels passed in
|
||||
// will be sorted by this function.
|
||||
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
|
||||
if len(m) == 0 || len(labels) == 0 {
|
||||
if len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
sort.Sort(LabelNames(labels))
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, label := range labels {
|
||||
hb.b.WriteString(string(label))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[label]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(label))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(m[label]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
||||
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
|
||||
@ -175,16 +133,12 @@ func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
|
||||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
sum := hashNew()
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
sum = hashAdd(sum, string(labelName))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
sum = hashAdd(sum, string(m[labelName]))
|
||||
sum = hashAddByte(sum, SeparatorByte)
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
return sum
|
||||
}
|
||||
|
46
cmd/vendor/github.com/prometheus/common/model/silence.go
generated
vendored
46
cmd/vendor/github.com/prometheus/common/model/silence.go
generated
vendored
@ -44,6 +44,21 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate returns true iff all fields of the matcher have valid values.
|
||||
func (m *Matcher) Validate() error {
|
||||
if !m.Name.IsValid() {
|
||||
return fmt.Errorf("invalid name %q", m.Name)
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
return fmt.Errorf("invalid regular expression %q", m.Value)
|
||||
}
|
||||
} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
|
||||
return fmt.Errorf("invalid value %q", m.Value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Silence defines the representation of a silence definiton
|
||||
// in the Prometheus eco-system.
|
||||
type Silence struct {
|
||||
@ -58,3 +73,34 @@ type Silence struct {
|
||||
CreatedBy string `json:"createdBy"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
}
|
||||
|
||||
// Validate returns true iff all fields of the silence have valid values.
|
||||
func (s *Silence) Validate() error {
|
||||
if len(s.Matchers) == 0 {
|
||||
return fmt.Errorf("at least one matcher required")
|
||||
}
|
||||
for _, m := range s.Matchers {
|
||||
if err := m.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid matcher: %s", err)
|
||||
}
|
||||
}
|
||||
if s.StartsAt.IsZero() {
|
||||
return fmt.Errorf("start time missing")
|
||||
}
|
||||
if s.EndsAt.IsZero() {
|
||||
return fmt.Errorf("end time missing")
|
||||
}
|
||||
if s.EndsAt.Before(s.StartsAt) {
|
||||
return fmt.Errorf("start time must be before end time")
|
||||
}
|
||||
if s.CreatedBy == "" {
|
||||
return fmt.Errorf("creator information missing")
|
||||
}
|
||||
if s.Comment == "" {
|
||||
return fmt.Errorf("comment missing")
|
||||
}
|
||||
if s.CreatedAt.IsZero() {
|
||||
return fmt.Errorf("creation timestamp missing")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
61
cmd/vendor/github.com/prometheus/common/model/time.go
generated
vendored
61
cmd/vendor/github.com/prometheus/common/model/time.go
generated
vendored
@ -163,51 +163,70 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
||||
// This type should not propagate beyond the scope of input/output processing.
|
||||
type Duration time.Duration
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||
|
||||
// StringToDuration parses a string into a time.Duration, assuming that a year
|
||||
// a day always has 24h.
|
||||
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
if len(matches) != 3 {
|
||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||
}
|
||||
durSeconds, _ := strconv.Atoi(matches[1])
|
||||
dur := time.Duration(durSeconds) * time.Second
|
||||
unit := matches[2]
|
||||
switch unit {
|
||||
var (
|
||||
n, _ = strconv.Atoi(matches[1])
|
||||
dur = time.Duration(n) * time.Millisecond
|
||||
)
|
||||
switch unit := matches[2]; unit {
|
||||
case "y":
|
||||
dur *= 1000 * 60 * 60 * 24 * 365
|
||||
case "w":
|
||||
dur *= 1000 * 60 * 60 * 24 * 7
|
||||
case "d":
|
||||
dur *= 60 * 60 * 24
|
||||
dur *= 1000 * 60 * 60 * 24
|
||||
case "h":
|
||||
dur *= 60 * 60
|
||||
dur *= 1000 * 60 * 60
|
||||
case "m":
|
||||
dur *= 60
|
||||
dur *= 1000 * 60
|
||||
case "s":
|
||||
dur *= 1
|
||||
dur *= 1000
|
||||
case "ms":
|
||||
// Value already correct
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
||||
}
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
|
||||
|
||||
func (d Duration) String() string {
|
||||
seconds := int64(time.Duration(d) / time.Second)
|
||||
var (
|
||||
ms = int64(time.Duration(d) / time.Millisecond)
|
||||
unit = "ms"
|
||||
)
|
||||
factors := map[string]int64{
|
||||
"d": 60 * 60 * 24,
|
||||
"h": 60 * 60,
|
||||
"m": 60,
|
||||
"s": 1,
|
||||
"y": 1000 * 60 * 60 * 24 * 365,
|
||||
"w": 1000 * 60 * 60 * 24 * 7,
|
||||
"d": 1000 * 60 * 60 * 24,
|
||||
"h": 1000 * 60 * 60,
|
||||
"m": 1000 * 60,
|
||||
"s": 1000,
|
||||
"ms": 1,
|
||||
}
|
||||
unit := "s"
|
||||
|
||||
switch int64(0) {
|
||||
case seconds % factors["d"]:
|
||||
case ms % factors["y"]:
|
||||
unit = "y"
|
||||
case ms % factors["w"]:
|
||||
unit = "w"
|
||||
case ms % factors["d"]:
|
||||
unit = "d"
|
||||
case seconds % factors["h"]:
|
||||
case ms % factors["h"]:
|
||||
unit = "h"
|
||||
case seconds % factors["m"]:
|
||||
case ms % factors["m"]:
|
||||
unit = "m"
|
||||
case ms % factors["s"]:
|
||||
unit = "s"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
|
||||
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
|
34
cmd/vendor/github.com/prometheus/common/model/value.go
generated
vendored
34
cmd/vendor/github.com/prometheus/common/model/value.go
generated
vendored
@ -16,11 +16,28 @@ package model
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
||||
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
||||
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
||||
// of 0, which is possible to appear in a real SamplePair and thus not
|
||||
// suitable to signal a non-existing SamplePair.
|
||||
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||
|
||||
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
||||
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
||||
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
||||
// of 0, which is possible to appear in a real Sample and thus not suitable
|
||||
// to signal a non-existing Sample.
|
||||
ZeroSample = Sample{Timestamp: Earliest}
|
||||
)
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time.
|
||||
type SampleValue float64
|
||||
@ -43,8 +60,14 @@ func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal returns true if the value of v and o is equal or if both are NaN. Note
|
||||
// that v==o is false if both are NaN. If you want the conventional float
|
||||
// behavior, use == to compare two SampleValues.
|
||||
func (v SampleValue) Equal(o SampleValue) bool {
|
||||
return v == o
|
||||
if v == o {
|
||||
return true
|
||||
}
|
||||
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
|
||||
}
|
||||
|
||||
func (v SampleValue) String() string {
|
||||
@ -77,9 +100,9 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
||||
}
|
||||
|
||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||
// Timestamps.
|
||||
// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
|
||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
|
||||
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
||||
}
|
||||
|
||||
func (s SamplePair) String() string {
|
||||
@ -93,7 +116,8 @@ type Sample struct {
|
||||
Timestamp Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// Equal compares first the metrics, then the timestamp, then the value.
|
||||
// Equal compares first the metrics, then the timestamp, then the value. The
|
||||
// sematics of value equality is defined by SampleValue.Equal.
|
||||
func (s *Sample) Equal(o *Sample) bool {
|
||||
if s == o {
|
||||
return true
|
||||
@ -105,7 +129,7 @@ func (s *Sample) Equal(o *Sample) bool {
|
||||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if s.Value != o.Value {
|
||||
if s.Value.Equal(o.Value) {
|
||||
return false
|
||||
}
|
||||
|
||||
|
13
cmd/vendor/github.com/prometheus/procfs/fs.go
generated
vendored
13
cmd/vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@ -27,14 +27,7 @@ func NewFS(mountPoint string) (FS, error) {
|
||||
return FS(mountPoint), nil
|
||||
}
|
||||
|
||||
func (fs FS) stat(p string) (os.FileInfo, error) {
|
||||
return os.Stat(path.Join(string(fs), p))
|
||||
}
|
||||
|
||||
func (fs FS) open(p string) (*os.File, error) {
|
||||
return os.Open(path.Join(string(fs), p))
|
||||
}
|
||||
|
||||
func (fs FS) readlink(p string) (string, error) {
|
||||
return os.Readlink(path.Join(string(fs), p))
|
||||
// Path returns the path of the given subsystem relative to the procfs root.
|
||||
func (fs FS) Path(p ...string) string {
|
||||
return path.Join(append([]string{string(fs)}, p...)...)
|
||||
}
|
||||
|
5
cmd/vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
5
cmd/vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -58,7 +59,7 @@ func NewIPVSStats() (IPVSStats, error) {
|
||||
|
||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
||||
file, err := fs.open("net/ip_vs_stats")
|
||||
file, err := os.Open(fs.Path("net/ip_vs_stats"))
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
@ -127,7 +128,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||
|
||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||
file, err := fs.open("net/ip_vs")
|
||||
file, err := os.Open(fs.Path("net/ip_vs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
88
cmd/vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
88
cmd/vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -3,7 +3,6 @@ package procfs
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -32,36 +31,22 @@ type MDStat struct {
|
||||
|
||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||
mdStatusFilePath := path.Join(string(fs), "mdstat")
|
||||
mdStatusFilePath := fs.Path("mdstat")
|
||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
||||
if err != nil {
|
||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||
}
|
||||
|
||||
mdStatusFile := string(content)
|
||||
|
||||
lines := strings.Split(mdStatusFile, "\n")
|
||||
var currentMD string
|
||||
|
||||
// Each md has at least the deviceline, statusline and one empty line afterwards
|
||||
// so we will have probably something of the order len(lines)/3 devices
|
||||
// so we use that for preallocation.
|
||||
estimateMDs := len(lines) / 3
|
||||
mdStates := make([]MDStat, 0, estimateMDs)
|
||||
|
||||
mdStates := []MDStat{}
|
||||
lines := strings.Split(string(content), "\n")
|
||||
for i, l := range lines {
|
||||
if l == "" {
|
||||
// Skip entirely empty lines.
|
||||
continue
|
||||
}
|
||||
|
||||
if l[0] == ' ' {
|
||||
// Those lines are not the beginning of a md-section.
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
||||
// We aren't interested in lines with general info.
|
||||
continue
|
||||
}
|
||||
|
||||
@ -69,32 +54,30 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||
if len(mainLine) < 3 {
|
||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
||||
}
|
||||
currentMD = mainLine[0] // name of md-device
|
||||
activityState := mainLine[2] // activity status of said md-device
|
||||
mdName := mainLine[0]
|
||||
activityState := mainLine[2]
|
||||
|
||||
if len(lines) <= i+3 {
|
||||
return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD)
|
||||
return mdStates, fmt.Errorf(
|
||||
"error parsing %s: too few lines for md device %s",
|
||||
mdStatusFilePath,
|
||||
mdName,
|
||||
)
|
||||
}
|
||||
|
||||
active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present
|
||||
active, total, size, err := evalStatusline(lines[i+1])
|
||||
if err != nil {
|
||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||
}
|
||||
|
||||
//
|
||||
// Now get the number of synced blocks.
|
||||
//
|
||||
|
||||
// Get the line number of the syncing-line.
|
||||
var j int
|
||||
if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line
|
||||
// j is the line number of the syncing-line.
|
||||
j := i + 2
|
||||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
||||
j = i + 3
|
||||
} else {
|
||||
j = i + 2
|
||||
}
|
||||
|
||||
// If device is syncing at the moment, get the number of currently synced bytes,
|
||||
// otherwise that number equals the size of the device.
|
||||
// If device is syncing at the moment, get the number of currently
|
||||
// synced bytes, otherwise that number equals the size of the device.
|
||||
syncedBlocks := size
|
||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
||||
syncedBlocks, err = evalBuildline(lines[j])
|
||||
@ -103,8 +86,14 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks})
|
||||
|
||||
mdStates = append(mdStates, MDStat{
|
||||
Name: mdName,
|
||||
ActivityState: activityState,
|
||||
DisksActive: active,
|
||||
DisksTotal: total,
|
||||
BlocksTotal: size,
|
||||
BlocksSynced: syncedBlocks,
|
||||
})
|
||||
}
|
||||
|
||||
return mdStates, nil
|
||||
@ -112,47 +101,38 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||
|
||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
||||
|
||||
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
|
||||
if len(matches) != 3+1 {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline)
|
||||
if len(matches) != 4 {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
||||
}
|
||||
|
||||
size, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||
}
|
||||
|
||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||
}
|
||||
|
||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||
}
|
||||
|
||||
return active, total, size, nil
|
||||
}
|
||||
|
||||
// Gets the size that has already been synced out of the sync-line.
|
||||
func evalBuildline(buildline string) (int64, error) {
|
||||
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
||||
|
||||
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
|
||||
if len(matches) < 1+1 {
|
||||
return 0, fmt.Errorf("too few matches found in buildline: %s", buildline)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
||||
}
|
||||
|
||||
if len(matches) > 1+1 {
|
||||
return 0, fmt.Errorf("too many matches found in buildline: %s", buildline)
|
||||
}
|
||||
|
||||
syncedSize, err := strconv.ParseInt(matches[1], 10, 64)
|
||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
|
||||
}
|
||||
|
||||
return syncedSize, nil
|
||||
return syncedBlocks, nil
|
||||
}
|
||||
|
552
cmd/vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
Normal file
552
cmd/vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
Normal file
@ -0,0 +1,552 @@
|
||||
package procfs
|
||||
|
||||
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
||||
// heavily as a reference:
|
||||
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
|
||||
//
|
||||
// Special thanks to Chris Siebenmann for all of his posts explaining the
|
||||
// various statistics available for NFS.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Constants shared between multiple functions.
|
||||
const (
|
||||
deviceEntryLen = 8
|
||||
|
||||
fieldBytesLen = 8
|
||||
fieldEventsLen = 27
|
||||
|
||||
statVersion10 = "1.0"
|
||||
statVersion11 = "1.1"
|
||||
|
||||
fieldTransport10Len = 10
|
||||
fieldTransport11Len = 13
|
||||
)
|
||||
|
||||
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||
type Mount struct {
|
||||
// Name of the device.
|
||||
Device string
|
||||
// The mount point of the device.
|
||||
Mount string
|
||||
// The filesystem type used by the device.
|
||||
Type string
|
||||
// If available additional statistics related to this Mount.
|
||||
// Use a type assertion to determine if additional statistics are available.
|
||||
Stats MountStats
|
||||
}
|
||||
|
||||
// A MountStats is a type which contains detailed statistics for a specific
|
||||
// type of Mount.
|
||||
type MountStats interface {
|
||||
mountStats()
|
||||
}
|
||||
|
||||
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
|
||||
type MountStatsNFS struct {
|
||||
// The version of statistics provided.
|
||||
StatVersion string
|
||||
// The age of the NFS mount.
|
||||
Age time.Duration
|
||||
// Statistics related to byte counters for various operations.
|
||||
Bytes NFSBytesStats
|
||||
// Statistics related to various NFS event occurrences.
|
||||
Events NFSEventsStats
|
||||
// Statistics broken down by filesystem operation.
|
||||
Operations []NFSOperationStats
|
||||
// Statistics about the NFS RPC transport.
|
||||
Transport NFSTransportStats
|
||||
}
|
||||
|
||||
// mountStats implements MountStats.
|
||||
func (m MountStatsNFS) mountStats() {}
|
||||
|
||||
// A NFSBytesStats contains statistics about the number of bytes read and written
|
||||
// by an NFS client to and from an NFS server.
|
||||
type NFSBytesStats struct {
|
||||
// Number of bytes read using the read() syscall.
|
||||
Read int
|
||||
// Number of bytes written using the write() syscall.
|
||||
Write int
|
||||
// Number of bytes read using the read() syscall in O_DIRECT mode.
|
||||
DirectRead int
|
||||
// Number of bytes written using the write() syscall in O_DIRECT mode.
|
||||
DirectWrite int
|
||||
// Number of bytes read from the NFS server, in total.
|
||||
ReadTotal int
|
||||
// Number of bytes written to the NFS server, in total.
|
||||
WriteTotal int
|
||||
// Number of pages read directly via mmap()'d files.
|
||||
ReadPages int
|
||||
// Number of pages written directly via mmap()'d files.
|
||||
WritePages int
|
||||
}
|
||||
|
||||
// A NFSEventsStats contains statistics about NFS event occurrences.
|
||||
type NFSEventsStats struct {
|
||||
// Number of times cached inode attributes are re-validated from the server.
|
||||
InodeRevalidate int
|
||||
// Number of times cached dentry nodes are re-validated from the server.
|
||||
DnodeRevalidate int
|
||||
// Number of times an inode cache is cleared.
|
||||
DataInvalidate int
|
||||
// Number of times cached inode attributes are invalidated.
|
||||
AttributeInvalidate int
|
||||
// Number of times files or directories have been open()'d.
|
||||
VFSOpen int
|
||||
// Number of times a directory lookup has occurred.
|
||||
VFSLookup int
|
||||
// Number of times permissions have been checked.
|
||||
VFSAccess int
|
||||
// Number of updates (and potential writes) to pages.
|
||||
VFSUpdatePage int
|
||||
// Number of pages read directly via mmap()'d files.
|
||||
VFSReadPage int
|
||||
// Number of times a group of pages have been read.
|
||||
VFSReadPages int
|
||||
// Number of pages written directly via mmap()'d files.
|
||||
VFSWritePage int
|
||||
// Number of times a group of pages have been written.
|
||||
VFSWritePages int
|
||||
// Number of times directory entries have been read with getdents().
|
||||
VFSGetdents int
|
||||
// Number of times attributes have been set on inodes.
|
||||
VFSSetattr int
|
||||
// Number of pending writes that have been forcefully flushed to the server.
|
||||
VFSFlush int
|
||||
// Number of times fsync() has been called on directories and files.
|
||||
VFSFsync int
|
||||
// Number of times locking has been attemped on a file.
|
||||
VFSLock int
|
||||
// Number of times files have been closed and released.
|
||||
VFSFileRelease int
|
||||
// Unknown. Possibly unused.
|
||||
CongestionWait int
|
||||
// Number of times files have been truncated.
|
||||
Truncation int
|
||||
// Number of times a file has been grown due to writes beyond its existing end.
|
||||
WriteExtension int
|
||||
// Number of times a file was removed while still open by another process.
|
||||
SillyRename int
|
||||
// Number of times the NFS server gave less data than expected while reading.
|
||||
ShortRead int
|
||||
// Number of times the NFS server wrote less data than expected while writing.
|
||||
ShortWrite int
|
||||
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
|
||||
// offline storage.
|
||||
JukeboxDelay int
|
||||
// Number of NFS v4.1+ pNFS reads.
|
||||
PNFSRead int
|
||||
// Number of NFS v4.1+ pNFS writes.
|
||||
PNFSWrite int
|
||||
}
|
||||
|
||||
// A NFSOperationStats contains statistics for a single operation.
|
||||
type NFSOperationStats struct {
|
||||
// The name of the operation.
|
||||
Operation string
|
||||
// Number of requests performed for this operation.
|
||||
Requests int
|
||||
// Number of times an actual RPC request has been transmitted for this operation.
|
||||
Transmissions int
|
||||
// Number of times a request has had a major timeout.
|
||||
MajorTimeouts int
|
||||
// Number of bytes sent for this operation, including RPC headers and payload.
|
||||
BytesSent int
|
||||
// Number of bytes received for this operation, including RPC headers and payload.
|
||||
BytesReceived int
|
||||
// Duration all requests spent queued for transmission before they were sent.
|
||||
CumulativeQueueTime time.Duration
|
||||
// Duration it took to get a reply back after the request was transmitted.
|
||||
CumulativeTotalResponseTime time.Duration
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestTime time.Duration
|
||||
}
|
||||
|
||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||
// responses.
|
||||
type NFSTransportStats struct {
|
||||
// The local port used for the NFS mount.
|
||||
Port int
|
||||
// Number of times the client has had to establish a connection from scratch
|
||||
// to the NFS server.
|
||||
Bind int
|
||||
// Number of times the client has made a TCP connection to the NFS server.
|
||||
Connect int
|
||||
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
|
||||
// spent waiting for connections to the server to be established.
|
||||
ConnectIdleTime int
|
||||
// Duration since the NFS mount last saw any RPC traffic.
|
||||
IdleTime time.Duration
|
||||
// Number of RPC requests for this mount sent to the NFS server.
|
||||
Sends int
|
||||
// Number of RPC responses for this mount received from the NFS server.
|
||||
Receives int
|
||||
// Number of times the NFS server sent a response with a transaction ID
|
||||
// unknown to this client.
|
||||
BadTransactionIDs int
|
||||
// A running counter, incremented on each request as the current difference
|
||||
// ebetween sends and receives.
|
||||
CumulativeActiveRequests int
|
||||
// A running counter, incremented on each request by the current backlog
|
||||
// queue size.
|
||||
CumulativeBacklog int
|
||||
|
||||
// Stats below only available with stat version 1.1.
|
||||
|
||||
// Maximum number of simultaneously active RPC requests ever used.
|
||||
MaximumRPCSlotsUsed int
|
||||
// A running counter, incremented on each request as the current size of the
|
||||
// sending queue.
|
||||
CumulativeSendingQueue int
|
||||
// A running counter, incremented on each request as the current size of the
|
||||
// pending queue.
|
||||
CumulativePendingQueue int
|
||||
}
|
||||
|
||||
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
|
||||
// of Mount structures containing detailed information about each mount.
|
||||
// If available, statistics for each mount are parsed as well.
|
||||
func parseMountStats(r io.Reader) ([]*Mount, error) {
|
||||
const (
|
||||
device = "device"
|
||||
statVersionPrefix = "statvers="
|
||||
|
||||
nfs3Type = "nfs"
|
||||
nfs4Type = "nfs4"
|
||||
)
|
||||
|
||||
var mounts []*Mount
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Only look for device entries in this function
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 || ss[0] != device {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMount(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Does this mount also possess statistics information?
|
||||
if len(ss) > deviceEntryLen {
|
||||
// Only NFSv3 and v4 are supported for parsing statistics
|
||||
if m.Type != nfs3Type && m.Type != nfs4Type {
|
||||
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
|
||||
}
|
||||
|
||||
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
|
||||
|
||||
stats, err := parseMountStatsNFS(s, statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Stats = stats
|
||||
}
|
||||
|
||||
mounts = append(mounts, m)
|
||||
}
|
||||
|
||||
return mounts, s.Err()
|
||||
}
|
||||
|
||||
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
|
||||
// device [device] mounted on [mount] with fstype [type]
|
||||
func parseMount(ss []string) (*Mount, error) {
|
||||
if len(ss) < deviceEntryLen {
|
||||
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||
}
|
||||
|
||||
// Check for specific words appearing at specific indices to ensure
|
||||
// the format is consistent with what we expect
|
||||
format := []struct {
|
||||
i int
|
||||
s string
|
||||
}{
|
||||
{i: 0, s: "device"},
|
||||
{i: 2, s: "mounted"},
|
||||
{i: 3, s: "on"},
|
||||
{i: 5, s: "with"},
|
||||
{i: 6, s: "fstype"},
|
||||
}
|
||||
|
||||
for _, f := range format {
|
||||
if ss[f.i] != f.s {
|
||||
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||
}
|
||||
}
|
||||
|
||||
return &Mount{
|
||||
Device: ss[1],
|
||||
Mount: ss[4],
|
||||
Type: ss[7],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
|
||||
// related to NFS statistics.
|
||||
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
||||
// Field indicators for parsing specific types of data
|
||||
const (
|
||||
fieldAge = "age:"
|
||||
fieldBytes = "bytes:"
|
||||
fieldEvents = "events:"
|
||||
fieldPerOpStats = "per-op"
|
||||
fieldTransport = "xprt:"
|
||||
)
|
||||
|
||||
stats := &MountStatsNFS{
|
||||
StatVersion: statVersion,
|
||||
}
|
||||
|
||||
for s.Scan() {
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 {
|
||||
break
|
||||
}
|
||||
if len(ss) < 2 {
|
||||
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
|
||||
}
|
||||
|
||||
switch ss[0] {
|
||||
case fieldAge:
|
||||
// Age integer is in seconds
|
||||
d, err := time.ParseDuration(ss[1] + "s")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Age = d
|
||||
case fieldBytes:
|
||||
bstats, err := parseNFSBytesStats(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Bytes = *bstats
|
||||
case fieldEvents:
|
||||
estats, err := parseNFSEventsStats(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Events = *estats
|
||||
case fieldTransport:
|
||||
if len(ss) < 3 {
|
||||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||
}
|
||||
|
||||
tstats, err := parseNFSTransportStats(ss[2:], statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Transport = *tstats
|
||||
}
|
||||
|
||||
// When encountering "per-operation statistics", we must break this
|
||||
// loop and parse them seperately to ensure we can terminate parsing
|
||||
// before reaching another device entry; hence why this 'if' statement
|
||||
// is not just another switch case
|
||||
if ss[0] == fieldPerOpStats {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NFS per-operation stats appear last before the next device entry
|
||||
perOpStats, err := parseNFSOperationStats(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Operations = perOpStats
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
|
||||
// integer fields.
|
||||
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
|
||||
if len(ss) != fieldBytesLen {
|
||||
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
|
||||
}
|
||||
|
||||
ns := make([]int, 0, fieldBytesLen)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSBytesStats{
|
||||
Read: ns[0],
|
||||
Write: ns[1],
|
||||
DirectRead: ns[2],
|
||||
DirectWrite: ns[3],
|
||||
ReadTotal: ns[4],
|
||||
WriteTotal: ns[5],
|
||||
ReadPages: ns[6],
|
||||
WritePages: ns[7],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
|
||||
// integer fields.
|
||||
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
|
||||
if len(ss) != fieldEventsLen {
|
||||
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
|
||||
}
|
||||
|
||||
ns := make([]int, 0, fieldEventsLen)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSEventsStats{
|
||||
InodeRevalidate: ns[0],
|
||||
DnodeRevalidate: ns[1],
|
||||
DataInvalidate: ns[2],
|
||||
AttributeInvalidate: ns[3],
|
||||
VFSOpen: ns[4],
|
||||
VFSLookup: ns[5],
|
||||
VFSAccess: ns[6],
|
||||
VFSUpdatePage: ns[7],
|
||||
VFSReadPage: ns[8],
|
||||
VFSReadPages: ns[9],
|
||||
VFSWritePage: ns[10],
|
||||
VFSWritePages: ns[11],
|
||||
VFSGetdents: ns[12],
|
||||
VFSSetattr: ns[13],
|
||||
VFSFlush: ns[14],
|
||||
VFSFsync: ns[15],
|
||||
VFSLock: ns[16],
|
||||
VFSFileRelease: ns[17],
|
||||
CongestionWait: ns[18],
|
||||
Truncation: ns[19],
|
||||
WriteExtension: ns[20],
|
||||
SillyRename: ns[21],
|
||||
ShortRead: ns[22],
|
||||
ShortWrite: ns[23],
|
||||
JukeboxDelay: ns[24],
|
||||
PNFSRead: ns[25],
|
||||
PNFSWrite: ns[26],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
|
||||
// additional information about per-operation statistics until an empty
|
||||
// line is reached.
|
||||
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
const (
|
||||
// Number of expected fields in each per-operation statistics set
|
||||
numFields = 9
|
||||
)
|
||||
|
||||
var ops []NFSOperationStats
|
||||
|
||||
for s.Scan() {
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 {
|
||||
// Must break when reading a blank line after per-operation stats to
|
||||
// enable top-level function to parse the next device entry
|
||||
break
|
||||
}
|
||||
|
||||
if len(ss) != numFields {
|
||||
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
|
||||
}
|
||||
|
||||
// Skip string operation name for integers
|
||||
ns := make([]int, 0, numFields-1)
|
||||
for _, st := range ss[1:] {
|
||||
n, err := strconv.Atoi(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
ops = append(ops, NFSOperationStats{
|
||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||
Requests: ns[0],
|
||||
Transmissions: ns[1],
|
||||
MajorTimeouts: ns[2],
|
||||
BytesSent: ns[3],
|
||||
BytesReceived: ns[4],
|
||||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
||||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
||||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
||||
})
|
||||
}
|
||||
|
||||
return ops, s.Err()
|
||||
}
|
||||
|
||||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||
// integer fields matched to a specific stats version.
|
||||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||
switch statVersion {
|
||||
case statVersion10:
|
||||
if len(ss) != fieldTransport10Len {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||
}
|
||||
case statVersion11:
|
||||
if len(ss) != fieldTransport11Len {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
|
||||
}
|
||||
|
||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||
// in a v1.0 response
|
||||
ns := make([]int, 0, fieldTransport11Len)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSTransportStats{
|
||||
Port: ns[0],
|
||||
Bind: ns[1],
|
||||
Connect: ns[2],
|
||||
ConnectIdleTime: ns[3],
|
||||
IdleTime: time.Duration(ns[4]) * time.Second,
|
||||
Sends: ns[5],
|
||||
Receives: ns[6],
|
||||
BadTransactionIDs: ns[7],
|
||||
CumulativeActiveRequests: ns[8],
|
||||
CumulativeBacklog: ns[9],
|
||||
MaximumRPCSlotsUsed: ns[10],
|
||||
CumulativeSendingQueue: ns[11],
|
||||
CumulativePendingQueue: ns[12],
|
||||
}, nil
|
||||
}
|
56
cmd/vendor/github.com/prometheus/procfs/proc.go
generated
vendored
56
cmd/vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -42,7 +41,7 @@ func NewProc(pid int) (Proc, error) {
|
||||
return fs.NewProc(pid)
|
||||
}
|
||||
|
||||
// AllProcs returns a list of all currently avaible processes under /proc.
|
||||
// AllProcs returns a list of all currently available processes under /proc.
|
||||
func AllProcs() (Procs, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
@ -53,7 +52,7 @@ func AllProcs() (Procs, error) {
|
||||
|
||||
// Self returns a process for the current process.
|
||||
func (fs FS) Self() (Proc, error) {
|
||||
p, err := fs.readlink("self")
|
||||
p, err := os.Readlink(fs.Path("self"))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
@ -66,15 +65,15 @@ func (fs FS) Self() (Proc, error) {
|
||||
|
||||
// NewProc returns a process for the given pid.
|
||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||
if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
|
||||
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
return Proc{PID: pid, fs: fs}, nil
|
||||
}
|
||||
|
||||
// AllProcs returns a list of all currently avaible processes.
|
||||
// AllProcs returns a list of all currently available processes.
|
||||
func (fs FS) AllProcs() (Procs, error) {
|
||||
d, err := fs.open("")
|
||||
d, err := os.Open(fs.Path())
|
||||
if err != nil {
|
||||
return Procs{}, err
|
||||
}
|
||||
@ -99,7 +98,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
||||
|
||||
// CmdLine returns the command line of a process.
|
||||
func (p Proc) CmdLine() ([]string, error) {
|
||||
f, err := p.open("cmdline")
|
||||
f, err := os.Open(p.path("cmdline"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -117,10 +116,25 @@ func (p Proc) CmdLine() ([]string, error) {
|
||||
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
||||
}
|
||||
|
||||
// Comm returns the command name of a process.
|
||||
func (p Proc) Comm() (string, error) {
|
||||
f, err := os.Open(p.path("comm"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(data)), nil
|
||||
}
|
||||
|
||||
// Executable returns the absolute path of the executable command of a process.
|
||||
func (p Proc) Executable() (string, error) {
|
||||
exe, err := p.readlink("exe")
|
||||
|
||||
exe, err := os.Readlink(p.path("exe"))
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
}
|
||||
@ -158,7 +172,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
|
||||
targets := make([]string, len(names))
|
||||
|
||||
for i, name := range names {
|
||||
target, err := p.readlink("fd/" + name)
|
||||
target, err := os.Readlink(p.path("fd", name))
|
||||
if err == nil {
|
||||
targets[i] = target
|
||||
}
|
||||
@ -178,8 +192,20 @@ func (p Proc) FileDescriptorsLen() (int, error) {
|
||||
return len(fds), nil
|
||||
}
|
||||
|
||||
// MountStats retrieves statistics and configuration for mount points in a
|
||||
// process's namespace.
|
||||
func (p Proc) MountStats() ([]*Mount, error) {
|
||||
f, err := os.Open(p.path("mountstats"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseMountStats(f)
|
||||
}
|
||||
|
||||
func (p Proc) fileDescriptors() ([]string, error) {
|
||||
d, err := p.open("fd")
|
||||
d, err := os.Open(p.path("fd"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -193,10 +219,6 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (p Proc) open(pa string) (*os.File, error) {
|
||||
return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
|
||||
}
|
||||
|
||||
func (p Proc) readlink(pa string) (string, error) {
|
||||
return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
|
||||
func (p Proc) path(pa ...string) string {
|
||||
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||
}
|
||||
|
3
cmd/vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
3
cmd/vendor/github.com/prometheus/procfs/proc_io.go
generated
vendored
@ -3,6 +3,7 @@ package procfs
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ProcIO models the content of /proc/<pid>/io.
|
||||
@ -29,7 +30,7 @@ type ProcIO struct {
|
||||
func (p Proc) NewIO() (ProcIO, error) {
|
||||
pio := ProcIO{}
|
||||
|
||||
f, err := p.open("io")
|
||||
f, err := os.Open(p.path("io"))
|
||||
if err != nil {
|
||||
return pio, err
|
||||
}
|
||||
|
64
cmd/vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
64
cmd/vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@ -3,29 +3,56 @@ package procfs
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ProcLimits represents the soft limits for each of the process's resource
|
||||
// limits.
|
||||
// limits. For more information see getrlimit(2):
|
||||
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
||||
type ProcLimits struct {
|
||||
CPUTime int
|
||||
FileSize int
|
||||
DataSize int
|
||||
StackSize int
|
||||
CoreFileSize int
|
||||
ResidentSet int
|
||||
Processes int
|
||||
OpenFiles int
|
||||
LockedMemory int
|
||||
AddressSpace int
|
||||
FileLocks int
|
||||
PendingSignals int
|
||||
MsqqueueSize int
|
||||
NicePriority int
|
||||
// CPU time limit in seconds.
|
||||
CPUTime int
|
||||
// Maximum size of files that the process may create.
|
||||
FileSize int
|
||||
// Maximum size of the process's data segment (initialized data,
|
||||
// uninitialized data, and heap).
|
||||
DataSize int
|
||||
// Maximum size of the process stack in bytes.
|
||||
StackSize int
|
||||
// Maximum size of a core file.
|
||||
CoreFileSize int
|
||||
// Limit of the process's resident set in pages.
|
||||
ResidentSet int
|
||||
// Maximum number of processes that can be created for the real user ID of
|
||||
// the calling process.
|
||||
Processes int
|
||||
// Value one greater than the maximum file descriptor number that can be
|
||||
// opened by this process.
|
||||
OpenFiles int
|
||||
// Maximum number of bytes of memory that may be locked into RAM.
|
||||
LockedMemory int
|
||||
// Maximum size of the process's virtual memory address space in bytes.
|
||||
AddressSpace int
|
||||
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
||||
// this process may establish.
|
||||
FileLocks int
|
||||
// Limit of signals that may be queued for the real user ID of the calling
|
||||
// process.
|
||||
PendingSignals int
|
||||
// Limit on the number of bytes that can be allocated for POSIX message
|
||||
// queues for the real user ID of the calling process.
|
||||
MsqqueueSize int
|
||||
// Limit of the nice priority set using setpriority(2) or nice(2).
|
||||
NicePriority int
|
||||
// Limit of the real-time priority set using sched_setscheduler(2) or
|
||||
// sched_setparam(2).
|
||||
RealtimePriority int
|
||||
RealtimeTimeout int
|
||||
// Limit (in microseconds) on the amount of CPU time that a process
|
||||
// scheduled under a real-time scheduling policy may consume without making
|
||||
// a blocking system call.
|
||||
RealtimeTimeout int
|
||||
}
|
||||
|
||||
const (
|
||||
@ -39,7 +66,7 @@ var (
|
||||
|
||||
// NewLimits returns the current soft limits of the process.
|
||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||
f, err := p.open("limits")
|
||||
f, err := os.Open(p.path("limits"))
|
||||
if err != nil {
|
||||
return ProcLimits{}, err
|
||||
}
|
||||
@ -60,7 +87,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
||||
case "Max cpu time":
|
||||
l.CPUTime, err = parseInt(fields[1])
|
||||
case "Max file size":
|
||||
l.FileLocks, err = parseInt(fields[1])
|
||||
l.FileSize, err = parseInt(fields[1])
|
||||
case "Max data size":
|
||||
l.DataSize, err = parseInt(fields[1])
|
||||
case "Max stack size":
|
||||
@ -90,7 +117,6 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
||||
case "Max realtime timeout":
|
||||
l.RealtimeTimeout, err = parseInt(fields[1])
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return ProcLimits{}, err
|
||||
}
|
||||
|
18
cmd/vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
18
cmd/vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -7,15 +7,15 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
|
||||
// required cgo. However, that caused a lot of problems regarding
|
||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
||||
// which required cgo. However, that caused a lot of problems regarding
|
||||
// cross-compilation. Alternatives such as running a binary to determine the
|
||||
// value, or trying to derive it in some other way were all problematic.
|
||||
// After much research it was determined that USER_HZ is actually hardcoded to
|
||||
// 100 on all Go-supported platforms as of the time of this writing. This is
|
||||
// why we decided to hardcode it here as well. It is not impossible that there
|
||||
// could be systems with exceptions, but they should be very exotic edge cases,
|
||||
// and in that case, the worst outcome will be two misreported metrics.
|
||||
// value, or trying to derive it in some other way were all problematic. After
|
||||
// much research it was determined that USER_HZ is actually hardcoded to 100 on
|
||||
// all Go-supported platforms as of the time of this writing. This is why we
|
||||
// decided to hardcode it here as well. It is not impossible that there could
|
||||
// be systems with exceptions, but they should be very exotic edge cases, and
|
||||
// in that case, the worst outcome will be two misreported metrics.
|
||||
//
|
||||
// See also the following discussions:
|
||||
//
|
||||
@ -91,7 +91,7 @@ type ProcStat struct {
|
||||
|
||||
// NewStat returns the current status information of the process.
|
||||
func (p Proc) NewStat() (ProcStat, error) {
|
||||
f, err := p.open("stat")
|
||||
f, err := os.Open(p.path("stat"))
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
}
|
||||
|
3
cmd/vendor/github.com/prometheus/procfs/stat.go
generated
vendored
3
cmd/vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@ -3,6 +3,7 @@ package procfs
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -25,7 +26,7 @@ func NewStat() (Stat, error) {
|
||||
|
||||
// NewStat returns an information about current kernel/system statistics.
|
||||
func (fs FS) NewStat() (Stat, error) {
|
||||
f, err := fs.open("stat")
|
||||
f, err := os.Open(fs.Path("stat"))
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
|
152
cmd/vendor/github.com/russross/blackfriday/block.go
generated
vendored
152
cmd/vendor/github.com/russross/blackfriday/block.go
generated
vendored
@ -102,7 +102,7 @@ func (p *parser) block(out *bytes.Buffer, data []byte) {
|
||||
// }
|
||||
// ```
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
if i := p.fencedCode(out, data, true); i > 0 {
|
||||
if i := p.fencedCodeBlock(out, data, true); i > 0 {
|
||||
data = data[i:]
|
||||
continue
|
||||
}
|
||||
@ -320,6 +320,11 @@ func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
return size
|
||||
}
|
||||
|
||||
// check for HTML CDATA
|
||||
if size := p.htmlCDATA(out, data, doRender); size > 0 {
|
||||
return size
|
||||
}
|
||||
|
||||
// no special case recognized
|
||||
return 0
|
||||
}
|
||||
@ -397,12 +402,10 @@ func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
return i
|
||||
}
|
||||
|
||||
// HTML comment, lax form
|
||||
func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
i := p.inlineHtmlComment(out, data)
|
||||
// needs to end with a blank line
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
|
||||
// html block needs to end with a blank line
|
||||
if i := p.isEmpty(data[start:]); i > 0 {
|
||||
size := start + i
|
||||
if doRender {
|
||||
// trim trailing newlines
|
||||
end := size
|
||||
@ -416,6 +419,35 @@ func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int
|
||||
return 0
|
||||
}
|
||||
|
||||
// HTML comment, lax form
|
||||
func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
i := p.inlineHTMLComment(out, data)
|
||||
return p.renderHTMLBlock(out, data, i, doRender)
|
||||
}
|
||||
|
||||
// HTML CDATA section
|
||||
func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
const cdataTag = "<![cdata["
|
||||
const cdataTagLen = len(cdataTag)
|
||||
if len(data) < cdataTagLen+1 {
|
||||
return 0
|
||||
}
|
||||
if !bytes.Equal(bytes.ToLower(data[:cdataTagLen]), []byte(cdataTag)) {
|
||||
return 0
|
||||
}
|
||||
i := cdataTagLen
|
||||
// scan for an end-of-comment marker, across lines if necessary
|
||||
for i < len(data) && !(data[i-2] == ']' && data[i-1] == ']' && data[i] == '>') {
|
||||
i++
|
||||
}
|
||||
i++
|
||||
// no end-of-comment marker
|
||||
if i >= len(data) {
|
||||
return 0
|
||||
}
|
||||
return p.renderHTMLBlock(out, data, i, doRender)
|
||||
}
|
||||
|
||||
// HR, which is the only self-closing block tag considered
|
||||
func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
|
||||
@ -432,19 +464,7 @@ func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
}
|
||||
|
||||
if data[i] == '>' {
|
||||
i++
|
||||
if j := p.isEmpty(data[i:]); j > 0 {
|
||||
size := i + j
|
||||
if doRender {
|
||||
// trim newlines
|
||||
end := size
|
||||
for end > 0 && data[end-1] == '\n' {
|
||||
end--
|
||||
}
|
||||
p.r.BlockHtml(out, data[:end])
|
||||
}
|
||||
return size
|
||||
}
|
||||
return p.renderHTMLBlock(out, data, i+1, doRender)
|
||||
}
|
||||
|
||||
return 0
|
||||
@ -495,7 +515,7 @@ func (p *parser) htmlFindEnd(tag string, data []byte) int {
|
||||
return i + skip
|
||||
}
|
||||
|
||||
func (p *parser) isEmpty(data []byte) int {
|
||||
func (*parser) isEmpty(data []byte) int {
|
||||
// it is okay to call isEmpty on an empty buffer
|
||||
if len(data) == 0 {
|
||||
return 0
|
||||
@ -510,7 +530,7 @@ func (p *parser) isEmpty(data []byte) int {
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func (p *parser) isHRule(data []byte) bool {
|
||||
func (*parser) isHRule(data []byte) bool {
|
||||
i := 0
|
||||
|
||||
// skip up to three spaces
|
||||
@ -539,21 +559,24 @@ func (p *parser) isHRule(data []byte) bool {
|
||||
return n >= 3
|
||||
}
|
||||
|
||||
func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) {
|
||||
// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
|
||||
// and returns the end index if so, or 0 otherwise. It also returns the marker found.
|
||||
// If syntax is not nil, it gets set to the syntax specified in the fence line.
|
||||
// A final newline is mandatory to recognize the fence line, unless newlineOptional is true.
|
||||
func isFenceLine(data []byte, syntax *string, oldmarker string, newlineOptional bool) (end int, marker string) {
|
||||
i, size := 0, 0
|
||||
skip = 0
|
||||
|
||||
// skip up to three spaces
|
||||
for i < len(data) && i < 3 && data[i] == ' ' {
|
||||
i++
|
||||
}
|
||||
if i >= len(data) {
|
||||
return
|
||||
}
|
||||
|
||||
// check for the marker characters: ~ or `
|
||||
if i >= len(data) {
|
||||
return 0, ""
|
||||
}
|
||||
if data[i] != '~' && data[i] != '`' {
|
||||
return
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
c := data[i]
|
||||
@ -564,27 +587,28 @@ func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (s
|
||||
i++
|
||||
}
|
||||
|
||||
if i >= len(data) {
|
||||
return
|
||||
}
|
||||
|
||||
// the marker char must occur at least 3 times
|
||||
if size < 3 {
|
||||
return
|
||||
return 0, ""
|
||||
}
|
||||
marker = string(data[i-size : i])
|
||||
|
||||
// if this is the end marker, it must match the beginning marker
|
||||
if oldmarker != "" && marker != oldmarker {
|
||||
return
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
|
||||
// into one, always get the syntax, and discard it if the caller doesn't care.
|
||||
if syntax != nil {
|
||||
syn := 0
|
||||
i = skipChar(data, i, ' ')
|
||||
|
||||
if i >= len(data) {
|
||||
return
|
||||
if newlineOptional && i == len(data) {
|
||||
return i, marker
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
syntaxStart := i
|
||||
@ -599,7 +623,7 @@ func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (s
|
||||
}
|
||||
|
||||
if i >= len(data) || data[i] != '}' {
|
||||
return
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// strip all whitespace at the beginning and the end
|
||||
@ -621,22 +645,26 @@ func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (s
|
||||
}
|
||||
}
|
||||
|
||||
language := string(data[syntaxStart : syntaxStart+syn])
|
||||
*syntax = &language
|
||||
*syntax = string(data[syntaxStart : syntaxStart+syn])
|
||||
}
|
||||
|
||||
i = skipChar(data, i, ' ')
|
||||
if i >= len(data) || data[i] != '\n' {
|
||||
return
|
||||
if newlineOptional && i == len(data) {
|
||||
return i, marker
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
skip = i + 1
|
||||
return
|
||||
return i + 1, marker // Take newline into account.
|
||||
}
|
||||
|
||||
func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
var lang *string
|
||||
beg, marker := p.isFencedCode(data, &lang, "")
|
||||
// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
|
||||
// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
|
||||
// If doRender is true, a final newline is mandatory to recognize the fenced code block.
|
||||
func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
var syntax string
|
||||
beg, marker := isFenceLine(data, &syntax, "", false)
|
||||
if beg == 0 || beg >= len(data) {
|
||||
return 0
|
||||
}
|
||||
@ -647,7 +675,8 @@ func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
// safe to assume beg < len(data)
|
||||
|
||||
// check for the end of the code block
|
||||
fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker)
|
||||
newlineOptional := !doRender
|
||||
fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional)
|
||||
if fenceEnd != 0 {
|
||||
beg += fenceEnd
|
||||
break
|
||||
@ -668,11 +697,6 @@ func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
|
||||
beg = end
|
||||
}
|
||||
|
||||
syntax := ""
|
||||
if lang != nil {
|
||||
syntax = *lang
|
||||
}
|
||||
|
||||
if doRender {
|
||||
p.r.BlockCode(out, work.Bytes(), syntax)
|
||||
}
|
||||
@ -914,7 +938,7 @@ func (p *parser) quote(out *bytes.Buffer, data []byte) int {
|
||||
// irregardless of any contents inside it
|
||||
for data[end] != '\n' {
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
if i := p.fencedCode(out, data[end:], false); i > 0 {
|
||||
if i := p.fencedCodeBlock(out, data[end:], false); i > 0 {
|
||||
// -1 to compensate for the extra end++ after the loop:
|
||||
end += i - 1
|
||||
break
|
||||
@ -1133,6 +1157,7 @@ gatherlines:
|
||||
// and move on to the next line
|
||||
if p.isEmpty(data[line:i]) > 0 {
|
||||
containsBlankLine = true
|
||||
raw.Write(data[line:i])
|
||||
line = i
|
||||
continue
|
||||
}
|
||||
@ -1153,6 +1178,14 @@ gatherlines:
|
||||
p.dliPrefix(chunk) > 0:
|
||||
|
||||
if containsBlankLine {
|
||||
// end the list if the type changed after a blank line
|
||||
if indent <= itemIndent &&
|
||||
((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
|
||||
(*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) {
|
||||
|
||||
*flags |= LIST_ITEM_END_OF_LIST
|
||||
break gatherlines
|
||||
}
|
||||
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
||||
}
|
||||
|
||||
@ -1200,17 +1233,10 @@ gatherlines:
|
||||
|
||||
// a blank line means this should be parsed as a block
|
||||
case containsBlankLine:
|
||||
raw.WriteByte('\n')
|
||||
*flags |= LIST_ITEM_CONTAINS_BLOCK
|
||||
}
|
||||
|
||||
// if this line was preceeded by one or more blanks,
|
||||
// re-introduce the blank into the buffer
|
||||
if containsBlankLine {
|
||||
containsBlankLine = false
|
||||
raw.WriteByte('\n')
|
||||
|
||||
}
|
||||
containsBlankLine = false
|
||||
|
||||
// add the line into the working buffer without prefix
|
||||
raw.Write(data[line+indent : i])
|
||||
@ -1218,6 +1244,12 @@ gatherlines:
|
||||
line = i
|
||||
}
|
||||
|
||||
// If reached end of data, the Renderer.ListItem call we're going to make below
|
||||
// is definitely the last in the list.
|
||||
if line >= len(data) {
|
||||
*flags |= LIST_ITEM_END_OF_LIST
|
||||
}
|
||||
|
||||
rawBytes := raw.Bytes()
|
||||
|
||||
// render the contents of the list item
|
||||
@ -1362,7 +1394,7 @@ func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
|
||||
|
||||
// if there's a fenced code block, paragraph is over
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
if p.fencedCode(out, current, false) > 0 {
|
||||
if p.fencedCodeBlock(out, current, false) > 0 {
|
||||
p.renderParagraph(out, data[:i])
|
||||
return i
|
||||
}
|
||||
|
27
cmd/vendor/github.com/russross/blackfriday/inline.go
generated
vendored
27
cmd/vendor/github.com/russross/blackfriday/inline.go
generated
vendored
@ -240,6 +240,8 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
i++
|
||||
}
|
||||
|
||||
brace := 0
|
||||
|
||||
// look for the matching closing bracket
|
||||
for level := 1; level > 0 && i < len(data); i++ {
|
||||
switch {
|
||||
@ -273,8 +275,8 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
i++
|
||||
}
|
||||
|
||||
// inline style link
|
||||
switch {
|
||||
// inline style link
|
||||
case i < len(data) && data[i] == '(':
|
||||
// skip initial whitespace
|
||||
i++
|
||||
@ -285,14 +287,27 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
|
||||
linkB := i
|
||||
|
||||
// look for link end: ' " )
|
||||
// look for link end: ' " ), check for new opening braces and take this
|
||||
// into account, this may lead for overshooting and probably will require
|
||||
// some fine-tuning.
|
||||
findlinkend:
|
||||
for i < len(data) {
|
||||
switch {
|
||||
case data[i] == '\\':
|
||||
i += 2
|
||||
|
||||
case data[i] == ')' || data[i] == '\'' || data[i] == '"':
|
||||
case data[i] == '(':
|
||||
brace++
|
||||
i++
|
||||
|
||||
case data[i] == ')':
|
||||
if brace <= 0 {
|
||||
break findlinkend
|
||||
}
|
||||
brace--
|
||||
i++
|
||||
|
||||
case data[i] == '\'' || data[i] == '"':
|
||||
break findlinkend
|
||||
|
||||
default:
|
||||
@ -560,7 +575,7 @@ func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
return i
|
||||
}
|
||||
|
||||
func (p *parser) inlineHtmlComment(out *bytes.Buffer, data []byte) int {
|
||||
func (p *parser) inlineHTMLComment(out *bytes.Buffer, data []byte) int {
|
||||
if len(data) < 5 {
|
||||
return 0
|
||||
}
|
||||
@ -584,7 +599,7 @@ func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
|
||||
data = data[offset:]
|
||||
altype := LINK_TYPE_NOT_AUTOLINK
|
||||
end := tagLength(data, &altype)
|
||||
if size := p.inlineHtmlComment(out, data); size > 0 {
|
||||
if size := p.inlineHTMLComment(out, data); size > 0 {
|
||||
end = size
|
||||
}
|
||||
if end > 2 {
|
||||
@ -923,7 +938,7 @@ func isMailtoAutoLink(data []byte) int {
|
||||
nb++
|
||||
|
||||
case '-', '.', '_':
|
||||
break
|
||||
// Do nothing.
|
||||
|
||||
case '>':
|
||||
if nb == 1 {
|
||||
|
164
cmd/vendor/github.com/russross/blackfriday/markdown.go
generated
vendored
164
cmd/vendor/github.com/russross/blackfriday/markdown.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const VERSION = "1.4"
|
||||
const VERSION = "1.5"
|
||||
|
||||
// These are the supported markdown parsing extensions.
|
||||
// OR these values together to select multiple extensions.
|
||||
@ -105,46 +105,46 @@ const (
|
||||
// blockTags is a set of tags that are recognized as HTML block tags.
|
||||
// Any of these can be included in markdown text without special escaping.
|
||||
var blockTags = map[string]struct{}{
|
||||
"blockquote": struct{}{},
|
||||
"del": struct{}{},
|
||||
"div": struct{}{},
|
||||
"dl": struct{}{},
|
||||
"fieldset": struct{}{},
|
||||
"form": struct{}{},
|
||||
"h1": struct{}{},
|
||||
"h2": struct{}{},
|
||||
"h3": struct{}{},
|
||||
"h4": struct{}{},
|
||||
"h5": struct{}{},
|
||||
"h6": struct{}{},
|
||||
"iframe": struct{}{},
|
||||
"ins": struct{}{},
|
||||
"math": struct{}{},
|
||||
"noscript": struct{}{},
|
||||
"ol": struct{}{},
|
||||
"pre": struct{}{},
|
||||
"p": struct{}{},
|
||||
"script": struct{}{},
|
||||
"style": struct{}{},
|
||||
"table": struct{}{},
|
||||
"ul": struct{}{},
|
||||
"blockquote": {},
|
||||
"del": {},
|
||||
"div": {},
|
||||
"dl": {},
|
||||
"fieldset": {},
|
||||
"form": {},
|
||||
"h1": {},
|
||||
"h2": {},
|
||||
"h3": {},
|
||||
"h4": {},
|
||||
"h5": {},
|
||||
"h6": {},
|
||||
"iframe": {},
|
||||
"ins": {},
|
||||
"math": {},
|
||||
"noscript": {},
|
||||
"ol": {},
|
||||
"pre": {},
|
||||
"p": {},
|
||||
"script": {},
|
||||
"style": {},
|
||||
"table": {},
|
||||
"ul": {},
|
||||
|
||||
// HTML5
|
||||
"address": struct{}{},
|
||||
"article": struct{}{},
|
||||
"aside": struct{}{},
|
||||
"canvas": struct{}{},
|
||||
"figcaption": struct{}{},
|
||||
"figure": struct{}{},
|
||||
"footer": struct{}{},
|
||||
"header": struct{}{},
|
||||
"hgroup": struct{}{},
|
||||
"main": struct{}{},
|
||||
"nav": struct{}{},
|
||||
"output": struct{}{},
|
||||
"progress": struct{}{},
|
||||
"section": struct{}{},
|
||||
"video": struct{}{},
|
||||
"address": {},
|
||||
"article": {},
|
||||
"aside": {},
|
||||
"canvas": {},
|
||||
"figcaption": {},
|
||||
"figure": {},
|
||||
"footer": {},
|
||||
"header": {},
|
||||
"hgroup": {},
|
||||
"main": {},
|
||||
"nav": {},
|
||||
"output": {},
|
||||
"progress": {},
|
||||
"section": {},
|
||||
"video": {},
|
||||
}
|
||||
|
||||
// Renderer is the rendering interface.
|
||||
@ -386,9 +386,9 @@ func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
|
||||
}
|
||||
|
||||
// first pass:
|
||||
// - extract references
|
||||
// - expand tabs
|
||||
// - normalize newlines
|
||||
// - extract references (outside of fenced code blocks)
|
||||
// - expand tabs (outside of fenced code blocks)
|
||||
// - copy everything else
|
||||
func firstPass(p *parser, input []byte) []byte {
|
||||
var out bytes.Buffer
|
||||
@ -396,46 +396,46 @@ func firstPass(p *parser, input []byte) []byte {
|
||||
if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
|
||||
tabSize = TAB_SIZE_EIGHT
|
||||
}
|
||||
beg, end := 0, 0
|
||||
beg := 0
|
||||
lastFencedCodeBlockEnd := 0
|
||||
for beg < len(input) { // iterate over lines
|
||||
if end = isReference(p, input[beg:], tabSize); end > 0 {
|
||||
beg += end
|
||||
} else { // skip to the next line
|
||||
end = beg
|
||||
for end < len(input) && input[end] != '\n' && input[end] != '\r' {
|
||||
end++
|
||||
}
|
||||
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
// track fenced code block boundaries to suppress tab expansion
|
||||
// inside them:
|
||||
if beg >= lastFencedCodeBlockEnd {
|
||||
if i := p.fencedCode(&out, input[beg:], false); i > 0 {
|
||||
lastFencedCodeBlockEnd = beg + i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add the line body if present
|
||||
if end > beg {
|
||||
if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
|
||||
out.Write(input[beg:end])
|
||||
} else {
|
||||
expandTabs(&out, input[beg:end], tabSize)
|
||||
}
|
||||
}
|
||||
out.WriteByte('\n')
|
||||
|
||||
if end < len(input) && input[end] == '\r' {
|
||||
end++
|
||||
}
|
||||
if end < len(input) && input[end] == '\n' {
|
||||
end++
|
||||
}
|
||||
|
||||
beg = end
|
||||
for beg < len(input) {
|
||||
// Find end of this line, then process the line.
|
||||
end := beg
|
||||
for end < len(input) && input[end] != '\n' && input[end] != '\r' {
|
||||
end++
|
||||
}
|
||||
|
||||
if p.flags&EXTENSION_FENCED_CODE != 0 {
|
||||
// track fenced code block boundaries to suppress tab expansion
|
||||
// and reference extraction inside them:
|
||||
if beg >= lastFencedCodeBlockEnd {
|
||||
if i := p.fencedCodeBlock(&out, input[beg:], false); i > 0 {
|
||||
lastFencedCodeBlockEnd = beg + i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add the line body if present
|
||||
if end > beg {
|
||||
if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
|
||||
out.Write(input[beg:end])
|
||||
} else if refEnd := isReference(p, input[beg:], tabSize); refEnd > 0 {
|
||||
beg += refEnd
|
||||
continue
|
||||
} else {
|
||||
expandTabs(&out, input[beg:end], tabSize)
|
||||
}
|
||||
}
|
||||
|
||||
if end < len(input) && input[end] == '\r' {
|
||||
end++
|
||||
}
|
||||
if end < len(input) && input[end] == '\n' {
|
||||
end++
|
||||
}
|
||||
out.WriteByte('\n')
|
||||
|
||||
beg = end
|
||||
}
|
||||
|
||||
// empty input?
|
||||
@ -635,12 +635,12 @@ func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffse
|
||||
i++
|
||||
}
|
||||
linkOffset = i
|
||||
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||||
i++
|
||||
}
|
||||
if i == len(data) {
|
||||
return
|
||||
}
|
||||
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
|
||||
i++
|
||||
}
|
||||
linkEnd = i
|
||||
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
|
||||
linkOffset++
|
||||
|
28
cmd/vendor/github.com/urfave/cli/LICENSE
generated
vendored
28
cmd/vendor/github.com/urfave/cli/LICENSE
generated
vendored
@ -1,21 +1,21 @@
|
||||
Copyright (C) 2013 Jeremy Saenz
|
||||
All Rights Reserved.
|
||||
MIT License
|
||||
|
||||
MIT LICENSE
|
||||
Copyright (c) 2016 Jeremy Saenz & Contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
14
cmd/vendor/github.com/urfave/cli/app.go
generated
vendored
14
cmd/vendor/github.com/urfave/cli/app.go
generated
vendored
@ -140,13 +140,6 @@ func (a *App) Setup() {
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
a.categories = CommandCategories{}
|
||||
for _, command := range a.Commands {
|
||||
a.categories = a.categories.AddCommand(command.Category, command)
|
||||
}
|
||||
sort.Sort(a.categories)
|
||||
|
||||
// append help to commands
|
||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||
a.Commands = append(a.Commands, helpCommand)
|
||||
if (HelpFlag != BoolFlag{}) {
|
||||
@ -154,7 +147,6 @@ func (a *App) Setup() {
|
||||
}
|
||||
}
|
||||
|
||||
//append version/help flags
|
||||
if a.EnableBashCompletion {
|
||||
a.appendFlag(BashCompletionFlag)
|
||||
}
|
||||
@ -162,6 +154,12 @@ func (a *App) Setup() {
|
||||
if !a.HideVersion {
|
||||
a.appendFlag(VersionFlag)
|
||||
}
|
||||
|
||||
a.categories = CommandCategories{}
|
||||
for _, command := range a.Commands {
|
||||
a.categories = a.categories.AddCommand(command.Category, command)
|
||||
}
|
||||
sort.Sort(a.categories)
|
||||
}
|
||||
|
||||
// Run is the entry point to the cli app. Parses the arguments slice and routes
|
||||
|
@ -1,29 +1,28 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build mips64 mips64le
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System call support for 386, FreeBSD
|
||||
// System calls for mips64, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-32
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-44
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-56
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-32
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-44
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
JMP syscall·RawSyscall6(SB)
|
31
cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
generated
vendored
Normal file
31
cmd/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
// +build mips mipsle
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for mips, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·Syscall9(SB),NOSPLIT,$0-52
|
||||
JMP syscall·Syscall9(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
|
||||
JMP syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
|
||||
JMP syscall·RawSyscall6(SB)
|
28
cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
generated
vendored
Normal file
28
cmd/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x
|
||||
// +build linux
|
||||
// +build !gccgo
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
//
|
||||
// System calls for s390x, Linux
|
||||
//
|
||||
|
||||
// Just jump to package syscall's implementation for all these functions.
|
||||
// The runtime may know about them.
|
||||
|
||||
TEXT ·Syscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·Syscall(SB)
|
||||
|
||||
TEXT ·Syscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·Syscall6(SB)
|
||||
|
||||
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
|
||||
BR syscall·RawSyscall(SB)
|
||||
|
||||
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
|
||||
BR syscall·RawSyscall6(SB)
|
35
cmd/vendor/golang.org/x/sys/unix/bluetooth_linux.go
generated
vendored
Normal file
35
cmd/vendor/golang.org/x/sys/unix/bluetooth_linux.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Bluetooth sockets and messages
|
||||
|
||||
package unix
|
||||
|
||||
// Bluetooth Protocols
|
||||
const (
|
||||
BTPROTO_L2CAP = 0
|
||||
BTPROTO_HCI = 1
|
||||
BTPROTO_SCO = 2
|
||||
BTPROTO_RFCOMM = 3
|
||||
BTPROTO_BNEP = 4
|
||||
BTPROTO_CMTP = 5
|
||||
BTPROTO_HIDP = 6
|
||||
BTPROTO_AVDTP = 7
|
||||
)
|
||||
|
||||
const (
|
||||
HCI_CHANNEL_RAW = 0
|
||||
HCI_CHANNEL_USER = 1
|
||||
HCI_CHANNEL_MONITOR = 2
|
||||
HCI_CHANNEL_CONTROL = 3
|
||||
)
|
||||
|
||||
// Socketoption Level
|
||||
const (
|
||||
SOL_BLUETOOTH = 0x112
|
||||
SOL_HCI = 0x0
|
||||
SOL_L2CAP = 0x6
|
||||
SOL_RFCOMM = 0x12
|
||||
SOL_SCO = 0x11
|
||||
)
|
2
cmd/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux,386 linux,arm
|
||||
// +build linux,386 linux,arm linux,mips linux,mipsle
|
||||
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
|
20
cmd/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
generated
vendored
Normal file
20
cmd/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build gccgo,linux,sparc64
|
||||
|
||||
package unix
|
||||
|
||||
import "syscall"
|
||||
|
||||
//extern sysconf
|
||||
func realSysconf(name int) int64
|
||||
|
||||
func sysconf(name int) (n int64, err syscall.Errno) {
|
||||
r := realSysconf(name)
|
||||
if r < 0 {
|
||||
return 0, syscall.GetErrno()
|
||||
}
|
||||
return r, 0
|
||||
}
|
62
cmd/vendor/golang.org/x/sys/unix/mkpost.go
generated
vendored
Normal file
62
cmd/vendor/golang.org/x/sys/unix/mkpost.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// mkpost processes the output of cgo -godefs to
|
||||
// modify the generated types. It is used to clean up
|
||||
// the sys API in an architecture specific manner.
|
||||
//
|
||||
// mkpost is run after cgo -godefs by mkall.sh.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
b, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s := string(b)
|
||||
|
||||
goarch := os.Getenv("GOARCH")
|
||||
goos := os.Getenv("GOOS")
|
||||
if goarch == "s390x" && goos == "linux" {
|
||||
// Export the types of PtraceRegs fields.
|
||||
re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)")
|
||||
s = re.ReplaceAllString(s, "Ptrace$1")
|
||||
|
||||
// Replace padding fields inserted by cgo with blank identifiers.
|
||||
re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*")
|
||||
s = re.ReplaceAllString(s, "_")
|
||||
|
||||
// Replace other unwanted fields with blank identifiers.
|
||||
re = regexp.MustCompile("X_[A-Za-z0-9_]*")
|
||||
s = re.ReplaceAllString(s, "_")
|
||||
|
||||
// Replace the control_regs union with a blank identifier for now.
|
||||
re = regexp.MustCompile("(Control_regs)\\s+\\[0\\]uint64")
|
||||
s = re.ReplaceAllString(s, "_ [0]uint64")
|
||||
}
|
||||
|
||||
// gofmt
|
||||
b, err = format.Source([]byte(s))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Append this command to the header to show where the new file
|
||||
// came from.
|
||||
re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)")
|
||||
b = re.ReplaceAll(b, []byte("$1 | go run mkpost.go"))
|
||||
|
||||
fmt.Printf("%s", b)
|
||||
}
|
8
cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
generated
vendored
8
cmd/vendor/golang.org/x/sys/unix/sockcmsg_unix.go
generated
vendored
@ -62,7 +62,7 @@ func ParseSocketControlMessage(b []byte) ([]SocketControlMessage, error) {
|
||||
|
||||
func socketControlMessageHeaderAndData(b []byte) (*Cmsghdr, []byte, error) {
|
||||
h := (*Cmsghdr)(unsafe.Pointer(&b[0]))
|
||||
if h.Len < SizeofCmsghdr || int(h.Len) > len(b) {
|
||||
if h.Len < SizeofCmsghdr || uint64(h.Len) > uint64(len(b)) {
|
||||
return nil, nil, EINVAL
|
||||
}
|
||||
return h, b[cmsgAlignOf(SizeofCmsghdr):h.Len], nil
|
||||
@ -77,10 +77,10 @@ func UnixRights(fds ...int) []byte {
|
||||
h.Level = SOL_SOCKET
|
||||
h.Type = SCM_RIGHTS
|
||||
h.SetLen(CmsgLen(datalen))
|
||||
data := uintptr(cmsgData(h))
|
||||
data := cmsgData(h)
|
||||
for _, fd := range fds {
|
||||
*(*int32)(unsafe.Pointer(data)) = int32(fd)
|
||||
data += 4
|
||||
*(*int32)(data) = int32(fd)
|
||||
data = unsafe.Pointer(uintptr(data) + 4)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall.go
generated
vendored
@ -68,6 +68,8 @@ func (tv *Timeval) Nano() int64 {
|
||||
return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
// use is a no-op, but the compiler cannot see that it is.
|
||||
// Calling use(p) ensures that p is kept live until that point.
|
||||
//go:noescape
|
||||
|
83
cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
83
cmd/vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
@ -450,27 +450,31 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e
|
||||
|
||||
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL
|
||||
|
||||
func Sysctl(name string) (value string, err error) {
|
||||
// sysctlmib translates name to mib number and appends any additional args.
|
||||
func sysctlmib(name string, args ...int) ([]_C_int, error) {
|
||||
// Translate name to mib number.
|
||||
mib, err := nametomib(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find size.
|
||||
n := uintptr(0)
|
||||
if err = sysctl(mib, nil, &n, nil, 0); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if n == 0 {
|
||||
return "", nil
|
||||
for _, a := range args {
|
||||
mib = append(mib, _C_int(a))
|
||||
}
|
||||
|
||||
// Read into buffer of that size.
|
||||
buf := make([]byte, n)
|
||||
if err = sysctl(mib, &buf[0], &n, nil, 0); err != nil {
|
||||
return mib, nil
|
||||
}
|
||||
|
||||
func Sysctl(name string) (string, error) {
|
||||
return SysctlArgs(name)
|
||||
}
|
||||
|
||||
func SysctlArgs(name string, args ...int) (string, error) {
|
||||
buf, err := SysctlRaw(name, args...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
n := len(buf)
|
||||
|
||||
// Throw away terminating NUL.
|
||||
if n > 0 && buf[n-1] == '\x00' {
|
||||
@ -479,17 +483,19 @@ func Sysctl(name string) (value string, err error) {
|
||||
return string(buf[0:n]), nil
|
||||
}
|
||||
|
||||
func SysctlUint32(name string) (value uint32, err error) {
|
||||
// Translate name to mib number.
|
||||
mib, err := nametomib(name)
|
||||
func SysctlUint32(name string) (uint32, error) {
|
||||
return SysctlUint32Args(name)
|
||||
}
|
||||
|
||||
func SysctlUint32Args(name string, args ...int) (uint32, error) {
|
||||
mib, err := sysctlmib(name, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read into buffer of that size.
|
||||
n := uintptr(4)
|
||||
buf := make([]byte, 4)
|
||||
if err = sysctl(mib, &buf[0], &n, nil, 0); err != nil {
|
||||
if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n != 4 {
|
||||
@ -498,6 +504,49 @@ func SysctlUint32(name string) (value uint32, err error) {
|
||||
return *(*uint32)(unsafe.Pointer(&buf[0])), nil
|
||||
}
|
||||
|
||||
func SysctlUint64(name string, args ...int) (uint64, error) {
|
||||
mib, err := sysctlmib(name, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := uintptr(8)
|
||||
buf := make([]byte, 8)
|
||||
if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n != 8 {
|
||||
return 0, EIO
|
||||
}
|
||||
return *(*uint64)(unsafe.Pointer(&buf[0])), nil
|
||||
}
|
||||
|
||||
func SysctlRaw(name string, args ...int) ([]byte, error) {
|
||||
mib, err := sysctlmib(name, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find size.
|
||||
n := uintptr(0)
|
||||
if err := sysctl(mib, nil, &n, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Read into buffer of that size.
|
||||
buf := make([]byte, n)
|
||||
if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The actual call may return less than the original reported required
|
||||
// size so ensure we deal with that.
|
||||
return buf[:n], nil
|
||||
}
|
||||
|
||||
//sys utimes(path string, timeval *[2]Timeval) (err error)
|
||||
|
||||
func Utimes(path string, tv []Timeval) error {
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
@ -144,6 +144,7 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (
|
||||
uintptr(options),
|
||||
0,
|
||||
)
|
||||
use(unsafe.Pointer(_p0))
|
||||
if e1 != 0 {
|
||||
return nil, e1
|
||||
}
|
||||
@ -196,6 +197,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
|
||||
bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
|
||||
}
|
||||
r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags))
|
||||
use(unsafe.Pointer(_p0))
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
generated
vendored
@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
|
4
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
generated
vendored
4
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
generated
vendored
@ -11,6 +11,8 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
|
||||
|
||||
func Getpagesize() int { return 4096 }
|
||||
|
||||
func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
|
||||
@ -21,8 +23,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
generated
vendored
@ -19,8 +19,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
generated
vendored
@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
|
1
cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
1
cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
@ -109,6 +109,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
|
||||
bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
|
||||
}
|
||||
r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
|
||||
use(unsafe.Pointer(_p0))
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
|
63
cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
generated
vendored
63
cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build 386,dragonfly
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func Getpagesize() int { return 4096 }
|
||||
|
||||
func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
|
||||
|
||||
func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
ts.Sec = int32(nsec / 1e9)
|
||||
ts.Nsec = int32(nsec % 1e9)
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
tv.Sec = int32(nsec / 1e9)
|
||||
return
|
||||
}
|
||||
|
||||
func SetKevent(k *Kevent_t, fd, mode, flags int) {
|
||||
k.Ident = uint32(fd)
|
||||
k.Filter = int16(mode)
|
||||
k.Flags = uint16(flags)
|
||||
}
|
||||
|
||||
func (iov *Iovec) SetLen(length int) {
|
||||
iov.Len = uint32(length)
|
||||
}
|
||||
|
||||
func (msghdr *Msghdr) SetControllen(length int) {
|
||||
msghdr.Controllen = uint32(length)
|
||||
}
|
||||
|
||||
func (cmsg *Cmsghdr) SetLen(length int) {
|
||||
cmsg.Len = uint32(length)
|
||||
}
|
||||
|
||||
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
|
||||
var writtenOut uint64 = 0
|
||||
_, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0)
|
||||
|
||||
written = int(writtenOut)
|
||||
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
generated
vendored
@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = nsec % 1e9 / 1e3
|
||||
|
1
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
1
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
@ -129,6 +129,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
|
||||
bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
|
||||
}
|
||||
r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
|
||||
use(unsafe.Pointer(_p0))
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
generated
vendored
@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
generated
vendored
@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = nsec % 1e9 / 1e3
|
||||
|
2
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
generated
vendored
2
cmd/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
generated
vendored
@ -21,8 +21,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return tv.Sec*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Usec = int32(nsec % 1e9 / 1e3)
|
||||
|
112
cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
112
cmd/vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
@ -36,10 +36,10 @@ func Creat(path string, mode uint32) (fd int, err error) {
|
||||
return Open(path, O_CREAT|O_WRONLY|O_TRUNC, mode)
|
||||
}
|
||||
|
||||
//sys linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
|
||||
//sys Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error)
|
||||
|
||||
func Link(oldpath string, newpath string) (err error) {
|
||||
return linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
|
||||
return Linkat(AT_FDCWD, oldpath, AT_FDCWD, newpath, 0)
|
||||
}
|
||||
|
||||
func Mkdir(path string, mode uint32) (err error) {
|
||||
@ -60,10 +60,19 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
|
||||
return openat(dirfd, path, flags|O_LARGEFILE, mode)
|
||||
}
|
||||
|
||||
//sys readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
||||
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
|
||||
|
||||
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
|
||||
if len(fds) == 0 {
|
||||
return ppoll(nil, 0, timeout, sigmask)
|
||||
}
|
||||
return ppoll(&fds[0], len(fds), timeout, sigmask)
|
||||
}
|
||||
|
||||
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
||||
|
||||
func Readlink(path string, buf []byte) (n int, err error) {
|
||||
return readlinkat(AT_FDCWD, path, buf)
|
||||
return Readlinkat(AT_FDCWD, path, buf)
|
||||
}
|
||||
|
||||
func Rename(oldpath string, newpath string) (err error) {
|
||||
@ -71,34 +80,41 @@ func Rename(oldpath string, newpath string) (err error) {
|
||||
}
|
||||
|
||||
func Rmdir(path string) error {
|
||||
return unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
|
||||
return Unlinkat(AT_FDCWD, path, AT_REMOVEDIR)
|
||||
}
|
||||
|
||||
//sys symlinkat(oldpath string, newdirfd int, newpath string) (err error)
|
||||
//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
|
||||
|
||||
func Symlink(oldpath string, newpath string) (err error) {
|
||||
return symlinkat(oldpath, AT_FDCWD, newpath)
|
||||
return Symlinkat(oldpath, AT_FDCWD, newpath)
|
||||
}
|
||||
|
||||
func Unlink(path string) error {
|
||||
return unlinkat(AT_FDCWD, path, 0)
|
||||
return Unlinkat(AT_FDCWD, path, 0)
|
||||
}
|
||||
|
||||
//sys unlinkat(dirfd int, path string, flags int) (err error)
|
||||
|
||||
func Unlinkat(dirfd int, path string) error {
|
||||
return unlinkat(dirfd, path, 0)
|
||||
}
|
||||
//sys Unlinkat(dirfd int, path string, flags int) (err error)
|
||||
|
||||
//sys utimes(path string, times *[2]Timeval) (err error)
|
||||
|
||||
func Utimes(path string, tv []Timeval) (err error) {
|
||||
func Utimes(path string, tv []Timeval) error {
|
||||
if tv == nil {
|
||||
err := utimensat(AT_FDCWD, path, nil, 0)
|
||||
if err != ENOSYS {
|
||||
return err
|
||||
}
|
||||
return utimes(path, nil)
|
||||
}
|
||||
if len(tv) != 2 {
|
||||
return EINVAL
|
||||
}
|
||||
var ts [2]Timespec
|
||||
ts[0] = NsecToTimespec(TimevalToNsec(tv[0]))
|
||||
ts[1] = NsecToTimespec(TimevalToNsec(tv[1]))
|
||||
err := utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
|
||||
if err != ENOSYS {
|
||||
return err
|
||||
}
|
||||
return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
|
||||
}
|
||||
|
||||
@ -123,8 +139,7 @@ func UtimesNano(path string, ts []Timespec) error {
|
||||
// in 2.6.22, Released, 8 July 2007) then fall back to utimes
|
||||
var tv [2]Timeval
|
||||
for i := 0; i < 2; i++ {
|
||||
tv[i].Sec = ts[i].Sec
|
||||
tv[i].Usec = ts[i].Nsec / 1000
|
||||
tv[i] = NsecToTimeval(TimespecToNsec(ts[i]))
|
||||
}
|
||||
return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
|
||||
}
|
||||
@ -383,6 +398,60 @@ func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
|
||||
}
|
||||
|
||||
type SockaddrHCI struct {
|
||||
Dev uint16
|
||||
Channel uint16
|
||||
raw RawSockaddrHCI
|
||||
}
|
||||
|
||||
func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
||||
sa.raw.Family = AF_BLUETOOTH
|
||||
sa.raw.Dev = sa.Dev
|
||||
sa.raw.Channel = sa.Channel
|
||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
|
||||
}
|
||||
|
||||
// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
|
||||
// The RxID and TxID fields are used for transport protocol addressing in
|
||||
// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
|
||||
// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning.
|
||||
//
|
||||
// The SockaddrCAN struct must be bound to the socket file descriptor
|
||||
// using Bind before the CAN socket can be used.
|
||||
//
|
||||
// // Read one raw CAN frame
|
||||
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
|
||||
// addr := &SockaddrCAN{Ifindex: index}
|
||||
// Bind(fd, addr)
|
||||
// frame := make([]byte, 16)
|
||||
// Read(fd, frame)
|
||||
//
|
||||
// The full SocketCAN documentation can be found in the linux kernel
|
||||
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
|
||||
type SockaddrCAN struct {
|
||||
Ifindex int
|
||||
RxID uint32
|
||||
TxID uint32
|
||||
raw RawSockaddrCAN
|
||||
}
|
||||
|
||||
func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
||||
if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
|
||||
return nil, 0, EINVAL
|
||||
}
|
||||
sa.raw.Family = AF_CAN
|
||||
sa.raw.Ifindex = int32(sa.Ifindex)
|
||||
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
|
||||
for i := 0; i < 4; i++ {
|
||||
sa.raw.Addr[i] = rx[i]
|
||||
}
|
||||
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
|
||||
for i := 0; i < 4; i++ {
|
||||
sa.raw.Addr[i+4] = tx[i]
|
||||
}
|
||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
|
||||
}
|
||||
|
||||
func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
|
||||
switch rsa.Addr.Family {
|
||||
case AF_NETLINK:
|
||||
@ -848,7 +917,6 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri
|
||||
//sysnb EpollCreate(size int) (fd int, err error)
|
||||
//sysnb EpollCreate1(flag int) (fd int, err error)
|
||||
//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
|
||||
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
|
||||
//sys Exit(code int) = SYS_EXIT_GROUP
|
||||
//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error)
|
||||
//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
|
||||
@ -872,6 +940,7 @@ func Getpgrp() (pid int) {
|
||||
//sysnb Getppid() (ppid int)
|
||||
//sys Getpriority(which int, who int) (prio int, err error)
|
||||
//sysnb Getrusage(who int, rusage *Rusage) (err error)
|
||||
//sysnb Getsid(pid int) (sid int, err error)
|
||||
//sysnb Gettid() (tid int)
|
||||
//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
|
||||
//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
|
||||
@ -883,9 +952,9 @@ func Getpgrp() (pid int) {
|
||||
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
|
||||
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
|
||||
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
|
||||
//sys Pause() (err error)
|
||||
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
|
||||
//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64
|
||||
//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
|
||||
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
|
||||
//sys read(fd int, p []byte) (n int, err error)
|
||||
//sys Removexattr(path string, attr string) (err error)
|
||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||
@ -894,6 +963,7 @@ func Getpgrp() (pid int) {
|
||||
//sysnb Setpgid(pid int, pgid int) (err error)
|
||||
//sysnb Setsid() (pid int, err error)
|
||||
//sysnb Settimeofday(tv *Timeval) (err error)
|
||||
//sys Setns(fd int, nstype int) (err error)
|
||||
|
||||
// issue 1435.
|
||||
// On linux Setuid and Setgid only affects the current thread, not the process.
|
||||
@ -920,7 +990,6 @@ func Setgid(uid int) (err error) {
|
||||
//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
|
||||
//sys Unshare(flags int) (err error)
|
||||
//sys Ustat(dev int, ubuf *Ustat_t) (err error)
|
||||
//sys Utime(path string, buf *Utimbuf) (err error)
|
||||
//sys write(fd int, p []byte) (n int, err error)
|
||||
//sys exitThread(code int) (err error) = SYS_EXIT
|
||||
//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
|
||||
@ -1020,9 +1089,6 @@ func Munmap(b []byte) (err error) {
|
||||
// Newfstatat
|
||||
// Nfsservctl
|
||||
// Personality
|
||||
// Poll
|
||||
// Ppoll
|
||||
// Prctl
|
||||
// Pselect6
|
||||
// Ptrace
|
||||
// Putpmsg
|
||||
|
15
cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go
generated
vendored
15
cmd/vendor/golang.org/x/sys/unix/syscall_linux_386.go
generated
vendored
@ -24,8 +24,6 @@ func NsecToTimespec(nsec int64) (ts Timespec) {
|
||||
return
|
||||
}
|
||||
|
||||
func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
|
||||
|
||||
func NsecToTimeval(nsec int64) (tv Timeval) {
|
||||
nsec += 999 // round up to microsecond
|
||||
tv.Sec = int32(nsec / 1e9)
|
||||
@ -93,6 +91,8 @@ func Pipe2(p []int, flags int) (err error) {
|
||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
||||
|
||||
//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
|
||||
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
|
||||
//sys Pause() (err error)
|
||||
|
||||
func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
|
||||
page := uintptr(offset / 4096)
|
||||
@ -181,6 +181,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
|
||||
//sysnb Gettimeofday(tv *Timeval) (err error)
|
||||
//sysnb Time(t *Time_t) (tt Time_t, err error)
|
||||
|
||||
//sys Utime(path string, buf *Utimbuf) (err error)
|
||||
|
||||
// On x86 Linux, all the socket calls go through an extra indirection,
|
||||
// I think because the 5-register system call interface can't handle
|
||||
// the 6-argument calls like sendto and recvfrom. Instead the
|
||||
@ -386,3 +388,12 @@ func (msghdr *Msghdr) SetControllen(length int) {
|
||||
func (cmsg *Cmsghdr) SetLen(length int) {
|
||||
cmsg.Len = uint32(length)
|
||||
}
|
||||
|
||||
//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
|
||||
|
||||
func Poll(fds []PollFd, timeout int) (n int, err error) {
|
||||
if len(fds) == 0 {
|
||||
return poll(nil, 0, timeout)
|
||||
}
|
||||
return poll(&fds[0], len(fds), timeout)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user