diff --git a/etcdserver/raft.go b/etcdserver/raft.go index e79ecd6ca..b3dced7f4 100644 --- a/etcdserver/raft.go +++ b/etcdserver/raft.go @@ -95,6 +95,7 @@ type raftNode struct { term uint64 lead uint64 + tickMu *sync.Mutex raftNodeConfig // a chan to send/receive snapshot @@ -131,6 +132,7 @@ type raftNodeConfig struct { func newRaftNode(cfg raftNodeConfig) *raftNode { r := &raftNode{ + tickMu: new(sync.Mutex), raftNodeConfig: cfg, // set up contention detectors for raft heartbeat message. // expect to send a heartbeat within 2 heartbeat intervals. @@ -149,6 +151,13 @@ func newRaftNode(cfg raftNodeConfig) *raftNode { return r } +// raft.Node does not have locks in Raft package +func (r *raftNode) tick() { + r.tickMu.Lock() + r.Tick() + r.tickMu.Unlock() +} + // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { @@ -161,7 +170,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { for { select { case <-r.ticker.C: - r.Tick() + r.tick() case rd := <-r.Ready(): if rd.SoftState != nil { newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead @@ -374,7 +383,7 @@ func (r *raftNode) resumeSending() { // speeding up election process. func (r *raftNode) advanceTicks(ticks int) { for i := 0; i < ticks; i++ { - r.Tick() + r.tick() } } @@ -416,7 +425,6 @@ func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (i raftStatus = n.Status raftStatusMu.Unlock() - advanceTicksForElection(n, c.ElectionTick) return id, n, s, w } diff --git a/etcdserver/server.go b/etcdserver/server.go index 271c5e773..b5856400a 100644 --- a/etcdserver/server.go +++ b/etcdserver/server.go @@ -513,11 +513,51 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return srv, nil } -// Start prepares and starts server in a new goroutine. It is no longer safe to -// modify a server's fields after it has been sent to Start. -// It also starts a goroutine to publish its server information. +func (s *EtcdServer) adjustTicks() { + clusterN := len(s.cluster.Members()) + + // single-node fresh start, or single-node recovers from snapshot + if clusterN == 1 { + ticks := s.Cfg.ElectionTicks - 1 + plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks) + s.r.advanceTicks(ticks) + return + } + + // retry up to "rafthttp.ConnReadTimeout", which is 5-sec + // until peer connection reports; otherwise: + // 1. all connections failed, or + // 2. no active peers, or + // 3. restarted single-node with no snapshot + // then, do nothing, because advancing ticks would have no effect + waitTime := rafthttp.ConnReadTimeout + itv := 50 * time.Millisecond + for i := int64(0); i < int64(waitTime/itv); i++ { + select { + case <-time.After(itv): + case <-s.stopping: + return + } + + peerN := s.r.transport.ActivePeers() + if peerN > 1 { + // multi-node received peer connection reports + // adjust ticks, in case slow leader message receive + ticks := s.Cfg.ElectionTicks - 2 + plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN) + s.r.advanceTicks(ticks) + return + } + } +} + +// Start performs any initialization of the Server necessary for it to +// begin serving requests. It must be called before Do or Process. +// Start must be non-blocking; any long-running server functionality +// should be implemented in goroutines. func (s *EtcdServer) Start() { s.start() + s.goAttach(func() { s.adjustTicks() }) s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) s.goAttach(s.purgeFile) s.goAttach(func() { monitorFileDescriptor(s.stopping) })