mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
*: clean up/fix server structured logs
Signed-off-by: Gyuho Lee <gyuhox@gmail.com>
This commit is contained in:
parent
229a0c9da6
commit
d0847f4f25
@ -163,7 +163,7 @@ func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt
|
||||
ver, err := getVersion(lg, m, rt)
|
||||
if err != nil {
|
||||
if lg != nil {
|
||||
lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
|
||||
lg.Warn("failed to get version", zap.String("remote-peer-id", m.ID.String()), zap.Error(err))
|
||||
} else {
|
||||
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
|
||||
}
|
||||
@ -191,8 +191,8 @@ func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *se
|
||||
if lg != nil {
|
||||
lg.Warn(
|
||||
"failed to parse server version of remote member",
|
||||
zap.String("remote-member-id", mid),
|
||||
zap.String("remote-member-version", ver.Server),
|
||||
zap.String("remote-peer-id", mid),
|
||||
zap.String("remote-peer-version", ver.Server),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
@ -205,8 +205,8 @@ func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *se
|
||||
lg.Warn(
|
||||
"local etcd version is not up-to-date",
|
||||
zap.String("local-member-version", lv.String()),
|
||||
zap.String("remote-member-id", mid),
|
||||
zap.String("remote-member-version", ver.Server),
|
||||
zap.String("remote-peer-id", mid),
|
||||
zap.String("remote-peer-version", ver.Server),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
|
||||
@ -254,8 +254,8 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc
|
||||
if lg != nil {
|
||||
lg.Warn(
|
||||
"failed to parse cluster version of remote member",
|
||||
zap.String("remote-member-id", id),
|
||||
zap.String("remote-member-cluster-version", v.Cluster),
|
||||
zap.String("remote-peer-id", id),
|
||||
zap.String("remote-peer-cluster-version", v.Cluster),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
@ -267,8 +267,8 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc
|
||||
if lg != nil {
|
||||
lg.Warn(
|
||||
"cluster version of remote member is not compatible; too low",
|
||||
zap.String("remote-member-id", id),
|
||||
zap.String("remote-member-cluster-version", clusterv.String()),
|
||||
zap.String("remote-peer-id", id),
|
||||
zap.String("remote-peer-cluster-version", clusterv.String()),
|
||||
zap.String("minimum-cluster-version-supported", minV.String()),
|
||||
)
|
||||
} else {
|
||||
@ -280,8 +280,8 @@ func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, loc
|
||||
if lg != nil {
|
||||
lg.Warn(
|
||||
"cluster version of remote member is not compatible; too high",
|
||||
zap.String("remote-member-id", id),
|
||||
zap.String("remote-member-cluster-version", clusterv.String()),
|
||||
zap.String("remote-peer-id", id),
|
||||
zap.String("remote-peer-cluster-version", clusterv.String()),
|
||||
zap.String("minimum-cluster-version-supported", minV.String()),
|
||||
)
|
||||
} else {
|
||||
@ -313,7 +313,7 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve
|
||||
lg.Warn(
|
||||
"failed to reach the peer URL",
|
||||
zap.String("address", addr),
|
||||
zap.String("remote-member-id", m.ID.String()),
|
||||
zap.String("remote-peer-id", m.ID.String()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
@ -329,7 +329,7 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve
|
||||
lg.Warn(
|
||||
"failed to read body of response",
|
||||
zap.String("address", addr),
|
||||
zap.String("remote-member-id", m.ID.String()),
|
||||
zap.String("remote-peer-id", m.ID.String()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
@ -343,7 +343,7 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve
|
||||
lg.Warn(
|
||||
"failed to unmarshal response",
|
||||
zap.String("address", addr),
|
||||
zap.String("remote-member-id", m.ID.String()),
|
||||
zap.String("remote-peer-id", m.ID.String()),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
|
@ -63,11 +63,11 @@ func (s *EtcdServer) CheckInitialHashKV() error {
|
||||
zap.Int64("local-member-revision", rev),
|
||||
zap.Int64("local-member-compact-revision", crev),
|
||||
zap.Uint32("local-member-hash", h),
|
||||
zap.String("remote-member-id", peerID.String()),
|
||||
zap.Strings("remote-member-endpoints", p.eps),
|
||||
zap.Int64("remote-member-revision", p.resp.Header.Revision),
|
||||
zap.Int64("remote-member-compact-revision", p.resp.CompactRevision),
|
||||
zap.Uint32("remote-member-hash", p.resp.Hash),
|
||||
zap.String("remote-peer-id", peerID.String()),
|
||||
zap.Strings("remote-peer-endpoints", p.eps),
|
||||
zap.Int64("remote-peer-revision", p.resp.Header.Revision),
|
||||
zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision),
|
||||
zap.Uint32("remote-peer-hash", p.resp.Hash),
|
||||
}
|
||||
|
||||
if h != p.resp.Hash {
|
||||
@ -100,8 +100,8 @@ func (s *EtcdServer) CheckInitialHashKV() error {
|
||||
zap.Int64("local-member-revision", rev),
|
||||
zap.Int64("local-member-compact-revision", crev),
|
||||
zap.Uint32("local-member-hash", h),
|
||||
zap.String("remote-member-id", p.id.String()),
|
||||
zap.Strings("remote-member-endpoints", p.eps),
|
||||
zap.String("remote-peer-id", p.id.String()),
|
||||
zap.Strings("remote-peer-endpoints", p.eps),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
@ -115,8 +115,8 @@ func (s *EtcdServer) CheckInitialHashKV() error {
|
||||
zap.Int64("local-member-revision", rev),
|
||||
zap.Int64("local-member-compact-revision", crev),
|
||||
zap.Uint32("local-member-hash", h),
|
||||
zap.String("remote-member-id", p.id.String()),
|
||||
zap.Strings("remote-member-endpoints", p.eps),
|
||||
zap.String("remote-peer-id", p.id.String()),
|
||||
zap.Strings("remote-peer-endpoints", p.eps),
|
||||
zap.Error(err),
|
||||
)
|
||||
} else {
|
||||
@ -336,8 +336,8 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
|
||||
lg.Warn(
|
||||
"failed to create client to peer URL",
|
||||
zap.String("local-member-id", s.ID().String()),
|
||||
zap.String("remote-member-id", p.id.String()),
|
||||
zap.Strings("remote-member-endpoints", p.eps),
|
||||
zap.String("remote-peer-id", p.id.String()),
|
||||
zap.Strings("remote-peer-endpoints", p.eps),
|
||||
zap.Error(cerr),
|
||||
)
|
||||
} else {
|
||||
@ -361,7 +361,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
|
||||
"failed hash kv request",
|
||||
zap.String("local-member-id", s.ID().String()),
|
||||
zap.Int64("requested-revision", rev),
|
||||
zap.String("remote-member-endpoint", c),
|
||||
zap.String("remote-peer-endpoint", c),
|
||||
zap.Error(cerr),
|
||||
)
|
||||
} else {
|
||||
|
@ -43,8 +43,9 @@ import (
|
||||
type RaftCluster struct {
|
||||
lg *zap.Logger
|
||||
|
||||
id types.ID
|
||||
token string
|
||||
localID types.ID
|
||||
cid types.ID
|
||||
token string
|
||||
|
||||
v2store v2store.Store
|
||||
be backend.Backend
|
||||
@ -75,7 +76,7 @@ func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap)
|
||||
|
||||
func NewClusterFromMembers(lg *zap.Logger, token string, id types.ID, membs []*Member) *RaftCluster {
|
||||
c := NewCluster(lg, token)
|
||||
c.id = id
|
||||
c.cid = id
|
||||
for _, m := range membs {
|
||||
c.members[m.ID] = m
|
||||
}
|
||||
@ -91,7 +92,7 @@ func NewCluster(lg *zap.Logger, token string) *RaftCluster {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RaftCluster) ID() types.ID { return c.id }
|
||||
func (c *RaftCluster) ID() types.ID { return c.cid }
|
||||
|
||||
func (c *RaftCluster) Members() []*Member {
|
||||
c.Lock()
|
||||
@ -178,7 +179,7 @@ func (c *RaftCluster) String() string {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "{ClusterID:%s ", c.id)
|
||||
fmt.Fprintf(b, "{ClusterID:%s ", c.cid)
|
||||
var ms []string
|
||||
for _, m := range c.members {
|
||||
ms = append(ms, fmt.Sprintf("%+v", m))
|
||||
@ -199,10 +200,13 @@ func (c *RaftCluster) genID() {
|
||||
binary.BigEndian.PutUint64(b[8*i:], uint64(id))
|
||||
}
|
||||
hash := sha1.Sum(b)
|
||||
c.id = types.ID(binary.BigEndian.Uint64(hash[:8]))
|
||||
c.cid = types.ID(binary.BigEndian.Uint64(hash[:8]))
|
||||
}
|
||||
|
||||
func (c *RaftCluster) SetID(id types.ID) { c.id = id }
|
||||
func (c *RaftCluster) SetID(localID, cid types.ID) {
|
||||
c.localID = localID
|
||||
c.cid = cid
|
||||
}
|
||||
|
||||
func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st }
|
||||
|
||||
@ -223,13 +227,14 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
|
||||
for _, m := range c.members {
|
||||
if c.lg != nil {
|
||||
c.lg.Info(
|
||||
"added member from store",
|
||||
zap.String("cluster-id", c.id.String()),
|
||||
zap.String("member-id", m.ID.String()),
|
||||
zap.Strings("member-peer-urls", m.PeerURLs),
|
||||
"recovered/added member from store",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("recovered-remote-peer-id", m.ID.String()),
|
||||
zap.Strings("recovered-remote-peer-urls", m.PeerURLs),
|
||||
)
|
||||
} else {
|
||||
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
|
||||
plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.cid)
|
||||
}
|
||||
}
|
||||
if c.version != nil {
|
||||
@ -337,12 +342,13 @@ func (c *RaftCluster) AddMember(m *Member) {
|
||||
if c.lg != nil {
|
||||
c.lg.Info(
|
||||
"added member",
|
||||
zap.String("member-id", m.ID.String()),
|
||||
zap.Strings("member-peer-urls", m.PeerURLs),
|
||||
zap.String("cluster-id", c.id.String()),
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("removed-remote-peer-id", m.ID.String()),
|
||||
zap.Strings("removed-remote-peer-peer-urls", m.PeerURLs),
|
||||
)
|
||||
} else {
|
||||
plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
|
||||
plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.cid)
|
||||
}
|
||||
}
|
||||
|
||||
@ -358,23 +364,36 @@ func (c *RaftCluster) RemoveMember(id types.ID) {
|
||||
mustDeleteMemberFromBackend(c.be, id)
|
||||
}
|
||||
|
||||
m, ok := c.members[id]
|
||||
delete(c.members, id)
|
||||
c.removed[id] = true
|
||||
|
||||
if c.lg != nil {
|
||||
c.lg.Info(
|
||||
"removed member",
|
||||
zap.String("member-id", id.String()),
|
||||
zap.String("cluster-id", c.id.String()),
|
||||
)
|
||||
if ok {
|
||||
c.lg.Info(
|
||||
"removed member",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("removed-remote-peer-id", id.String()),
|
||||
zap.Strings("removed-remote-peer-urls", m.PeerURLs),
|
||||
)
|
||||
} else {
|
||||
c.lg.Warn(
|
||||
"skipped removing already removed member",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("removed-remote-peer-id", id.String()),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
plog.Infof("removed member %s from cluster %s", id, c.id)
|
||||
plog.Infof("removed member %s from cluster %s", id, c.cid)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if m, ok := c.members[id]; ok {
|
||||
m.Attributes = attr
|
||||
if c.v2store != nil {
|
||||
@ -385,17 +404,28 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, ok := c.removed[id]
|
||||
if !ok {
|
||||
if c.lg != nil {
|
||||
c.lg.Panic("failed to update; member unknown", zap.String("member-id", id.String()))
|
||||
c.lg.Panic(
|
||||
"failed to update; member unknown",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("unknown-remote-peer-id", id.String()),
|
||||
)
|
||||
} else {
|
||||
plog.Panicf("error updating attributes of unknown member %s", id)
|
||||
}
|
||||
}
|
||||
|
||||
if c.lg != nil {
|
||||
c.lg.Warn("skipped attributes update of removed member", zap.String("member-id", id.String()))
|
||||
c.lg.Warn(
|
||||
"skipped attributes update of removed member",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("removed-remote-peer-id", id.String()),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("skipped updating attributes of removed member %s", id)
|
||||
}
|
||||
@ -416,12 +446,13 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes)
|
||||
if c.lg != nil {
|
||||
c.lg.Info(
|
||||
"updated member",
|
||||
zap.String("member-id", id.String()),
|
||||
zap.Strings("member-peer-urls", raftAttr.PeerURLs),
|
||||
zap.String("cluster-id", c.id.String()),
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("updated-remote-peer-id", id.String()),
|
||||
zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
|
||||
)
|
||||
} else {
|
||||
plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
|
||||
plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.cid)
|
||||
}
|
||||
}
|
||||
|
||||
@ -441,6 +472,8 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s
|
||||
if c.lg != nil {
|
||||
c.lg.Info(
|
||||
"updated cluster version",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("from", version.Cluster(c.version.String())),
|
||||
zap.String("from", version.Cluster(ver.String())),
|
||||
)
|
||||
@ -451,6 +484,8 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s
|
||||
if c.lg != nil {
|
||||
c.lg.Info(
|
||||
"set initial cluster version",
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
zap.String("cluster-version", version.Cluster(ver.String())),
|
||||
)
|
||||
} else {
|
||||
@ -497,6 +532,8 @@ func (c *RaftCluster) IsReadyToAddNewMember() bool {
|
||||
"rejecting member add; started member will be less than quorum",
|
||||
zap.Int("number-of-started-member", nstarted),
|
||||
zap.Int("quorum", nquorum),
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
|
||||
@ -529,6 +566,8 @@ func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool {
|
||||
"rejecting member remove; started member will be less than quorum",
|
||||
zap.Int("number-of-started-member", nstarted),
|
||||
zap.Int("quorum", nquorum),
|
||||
zap.String("cluster-id", c.cid.String()),
|
||||
zap.String("local-member-id", c.localID.String()),
|
||||
)
|
||||
} else {
|
||||
plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
|
||||
|
@ -486,15 +486,15 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Info(
|
||||
"restarting local member",
|
||||
zap.String("local-member-id", id.String()),
|
||||
zap.String("cluster-id", cid.String()),
|
||||
zap.String("local-member-id", id.String()),
|
||||
zap.Uint64("commit-index", st.Commit),
|
||||
)
|
||||
} else {
|
||||
plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
|
||||
}
|
||||
cl := membership.NewCluster(cfg.Logger, "")
|
||||
cl.SetID(cid)
|
||||
cl.SetID(id, cid)
|
||||
s := raft.NewMemoryStorage()
|
||||
if snapshot != nil {
|
||||
s.ApplySnapshot(*snapshot)
|
||||
@ -578,8 +578,8 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Info(
|
||||
"forcing restart member",
|
||||
zap.String("local-member-id", id.String()),
|
||||
zap.String("cluster-id", cid.String()),
|
||||
zap.String("local-member-id", id.String()),
|
||||
zap.Uint64("commit-index", st.Commit),
|
||||
)
|
||||
} else {
|
||||
@ -587,7 +587,7 @@ func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types
|
||||
}
|
||||
|
||||
cl := membership.NewCluster(cfg.Logger, "")
|
||||
cl.SetID(cid)
|
||||
cl.SetID(id, cid)
|
||||
s := raft.NewMemoryStorage()
|
||||
if snapshot != nil {
|
||||
s.ApplySnapshot(*snapshot)
|
||||
|
@ -348,11 +348,12 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
|
||||
remotes = existingCluster.Members()
|
||||
cl.SetID(existingCluster.ID())
|
||||
cl.SetID(types.ID(0), existingCluster.ID())
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(be)
|
||||
cfg.Print()
|
||||
id, n, s, w = startNode(cfg, cl, nil)
|
||||
cl.SetID(id, existingCluster.ID())
|
||||
|
||||
case !haveWAL && cfg.NewCluster:
|
||||
if err = cfg.VerifyBootstrap(); err != nil {
|
||||
@ -388,6 +389,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
cl.SetBackend(be)
|
||||
cfg.PrintWithInitial()
|
||||
id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
|
||||
cl.SetID(id, cl.ID())
|
||||
|
||||
case haveWAL:
|
||||
if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
|
||||
|
@ -14,9 +14,7 @@
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
import "strconv"
|
||||
|
||||
// ID represents a generic identifier which is canonically
|
||||
// stored as a uint64 but is typically represented as a
|
||||
|
Loading…
x
Reference in New Issue
Block a user