Move config (ServerConfig) out of etcdserver package.

Motivation:
  - ServerConfig is part of 'embed' public API, while etcdserver is more 'internal'
  - EtcdServer is already too big and config is pretty wide-spread leaf
if we were to split etcdserver (e.g. into pre & post-apply part).
This commit is contained in:
Piotr Tabor
2021-03-11 19:55:20 +01:00
parent 3ead91ca3e
commit fd7fed1511
9 changed files with 49 additions and 42 deletions

View File

@@ -20,6 +20,7 @@ import (
"time"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/mvcc/backend"
@@ -27,9 +28,9 @@ import (
"go.uber.org/zap"
)
func newBackend(cfg ServerConfig) backend.Backend {
func newBackend(cfg config.ServerConfig) backend.Backend {
bcfg := backend.DefaultBackendConfig()
bcfg.Path = cfg.backendPath()
bcfg.Path = cfg.BackendPath()
bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
if cfg.BackendBatchLimit != 0 {
bcfg.BatchLimit = cfg.BackendBatchLimit
@@ -53,20 +54,20 @@ func newBackend(cfg ServerConfig) backend.Backend {
}
// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
if err != nil {
return nil, fmt.Errorf("failed to find database snapshot file (%v)", err)
}
if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
if err := os.Rename(snapPath, cfg.BackendPath()); err != nil {
return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err)
}
return openBackend(cfg), nil
}
// openBackend returns a backend using the current etcd db.
func openBackend(cfg ServerConfig) backend.Backend {
fn := cfg.backendPath()
func openBackend(cfg config.ServerConfig) backend.Backend {
fn := cfg.BackendPath()
now, beOpened := time.Now(), make(chan backend.Backend)
go func() {
@@ -93,7 +94,7 @@ func openBackend(cfg ServerConfig) backend.Backend {
// before updating the backend db after persisting raft snapshot to disk,
// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
// case, replace the db with the snapshot db sent by the leader.
func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool) (backend.Backend, error) {
func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool) (backend.Backend, error) {
consistentIndex := uint64(0)
if beExist {
ci := cindex.NewConsistentIndex(oldbe.BatchTx())

View File

@@ -1,327 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"context"
"fmt"
"path/filepath"
"sort"
"strings"
"time"
"go.etcd.io/etcd/pkg/v3/netutil"
"go.etcd.io/etcd/pkg/v3/transport"
"go.etcd.io/etcd/pkg/v3/types"
bolt "go.etcd.io/bbolt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
type ServerConfig struct {
Name string
DiscoveryURL string
DiscoveryProxy string
ClientURLs types.URLs
PeerURLs types.URLs
DataDir string
// DedicatedWALDir config will make the etcd to write the WAL to the WALDir
// rather than the dataDir/member/wal.
DedicatedWALDir string
SnapshotCount uint64
// SnapshotCatchUpEntries is the number of entries for a slow follower
// to catch-up after compacting the raft storage entries.
// We expect the follower has a millisecond level latency with the leader.
// The max throughput is around 10K. Keep a 5K entries is enough for helping
// follower to catch up.
// WARNING: only change this for tests. Always use "DefaultSnapshotCatchUpEntries"
SnapshotCatchUpEntries uint64
MaxSnapFiles uint
MaxWALFiles uint
// BackendBatchInterval is the maximum time before commit the backend transaction.
BackendBatchInterval time.Duration
// BackendBatchLimit is the maximum operations before commit the backend transaction.
BackendBatchLimit int
// BackendFreelistType is the type of the backend boltdb freelist.
BackendFreelistType bolt.FreelistType
InitialPeerURLsMap types.URLsMap
InitialClusterToken string
NewCluster bool
PeerTLSInfo transport.TLSInfo
CORS map[string]struct{}
// HostWhitelist lists acceptable hostnames from client requests.
// If server is insecure (no TLS), server only accepts requests
// whose Host header value exists in this white list.
HostWhitelist map[string]struct{}
TickMs uint
ElectionTicks int
// InitialElectionTickAdvance is true, then local member fast-forwards
// election ticks to speed up "initial" leader election trigger. This
// benefits the case of larger election ticks. For instance, cross
// datacenter deployment may require longer election timeout of 10-second.
// If true, local node does not need wait up to 10-second. Instead,
// forwards its election ticks to 8-second, and have only 2-second left
// before leader election.
//
// Major assumptions are that:
// - cluster has no active leader thus advancing ticks enables faster
// leader election, or
// - cluster already has an established leader, and rejoining follower
// is likely to receive heartbeats from the leader after tick advance
// and before election timeout.
//
// However, when network from leader to rejoining follower is congested,
// and the follower does not receive leader heartbeat within left election
// ticks, disruptive election has to happen thus affecting cluster
// availabilities.
//
// Disabling this would slow down initial bootstrap process for cross
// datacenter deployments. Make your own tradeoffs by configuring
// --initial-election-tick-advance at the cost of slow initial bootstrap.
//
// If single-node, it advances ticks regardless.
//
// See https://github.com/etcd-io/etcd/issues/9333 for more detail.
InitialElectionTickAdvance bool
BootstrapTimeout time.Duration
AutoCompactionRetention time.Duration
AutoCompactionMode string
CompactionBatchLimit int
QuotaBackendBytes int64
MaxTxnOps uint
// MaxRequestBytes is the maximum request size to send over raft.
MaxRequestBytes uint
WarningApplyDuration time.Duration
StrictReconfigCheck bool
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
ClientCertAuthEnabled bool
AuthToken string
BcryptCost uint
TokenTTL uint
// InitialCorruptCheck is true to check data corruption on boot
// before serving any peer/client traffic.
InitialCorruptCheck bool
CorruptCheckTime time.Duration
// PreVote is true to enable Raft Pre-Vote.
PreVote bool
// SocketOpts are socket options passed to listener config.
SocketOpts transport.SocketOpts
// Logger logs server-side operations.
// If not nil, it disables "capnslog" and uses the given logger.
Logger *zap.Logger
// LoggerConfig is server logger configuration for Raft logger.
// Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil".
LoggerConfig *zap.Config
// LoggerCore is "zapcore.Core" for raft logger.
// Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil".
LoggerCore zapcore.Core
LoggerWriteSyncer zapcore.WriteSyncer
ForceNewCluster bool
// EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
EnableLeaseCheckpoint bool
// LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
LeaseCheckpointInterval time.Duration
EnableGRPCGateway bool
WatchProgressNotifyInterval time.Duration
// UnsafeNoFsync disables all uses of fsync.
// Setting this is unsafe and will cause data loss.
UnsafeNoFsync bool `json:"unsafe-no-fsync"`
DowngradeCheckTime time.Duration
// ExperimentalMemoryMlock enables mlocking of etcd owned memory pages.
// The setting improves etcd tail latency in environments were:
// - memory pressure might lead to swapping pages to disk
// - disk latency might be unstable
// Currently all etcd memory gets mlocked, but in future the flag can
// be refined to mlock in-use area of bbolt only.
ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"`
}
// VerifyBootstrap sanity-checks the initial config for bootstrap case
// and returns an error for things that should never happen.
func (c *ServerConfig) VerifyBootstrap() error {
if err := c.hasLocalMember(); err != nil {
return err
}
if err := c.advertiseMatchesCluster(); err != nil {
return err
}
if checkDuplicateURL(c.InitialPeerURLsMap) {
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
}
if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
return fmt.Errorf("initial cluster unset and no discovery URL found")
}
return nil
}
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
// case and returns an error for things that should never happen.
func (c *ServerConfig) VerifyJoinExisting() error {
// The member has announced its peer urls to the cluster before starting; no need to
// set the configuration again.
if err := c.hasLocalMember(); err != nil {
return err
}
if checkDuplicateURL(c.InitialPeerURLsMap) {
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
}
if c.DiscoveryURL != "" {
return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
}
return nil
}
// hasLocalMember checks that the cluster at least contains the local server.
func (c *ServerConfig) hasLocalMember() error {
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
}
return nil
}
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
func (c *ServerConfig) advertiseMatchesCluster() error {
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
urls.Sort()
sort.Strings(apurls)
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel()
ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice())
if ok {
return nil
}
initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
for _, url := range c.PeerURLs {
apMap[url.String()] = struct{}{}
}
for _, url := range c.InitialPeerURLsMap[c.Name] {
initMap[url.String()] = struct{}{}
}
missing := []string{}
for url := range initMap {
if _, ok := apMap[url]; !ok {
missing = append(missing, url)
}
}
if len(missing) > 0 {
for i := range missing {
missing[i] = c.Name + "=" + missing[i]
}
mstr := strings.Join(missing, ",")
apStr := strings.Join(apurls, ",")
return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
}
for url := range apMap {
if _, ok := initMap[url]; !ok {
missing = append(missing, url)
}
}
if len(missing) > 0 {
mstr := strings.Join(missing, ",")
umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
}
// resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
apStr := strings.Join(apurls, ",")
umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
}
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
func (c *ServerConfig) WALDir() string {
if c.DedicatedWALDir != "" {
return c.DedicatedWALDir
}
return filepath.Join(c.MemberDir(), "wal")
}
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
// ReqTimeout returns timeout for request to finish.
func (c *ServerConfig) ReqTimeout() time.Duration {
// 5s for queue waiting, computation and disk IO delay
// + 2 * election timeout for possible leader election
return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
}
func (c *ServerConfig) electionTimeout() time.Duration {
return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
}
func (c *ServerConfig) peerDialTimeout() time.Duration {
// 1s for queue wait and election timeout
return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
}
func checkDuplicateURL(urlsmap types.URLsMap) bool {
um := make(map[string]bool)
for _, urls := range urlsmap {
for _, url := range urls {
u := url.String()
if um[u] {
return true
}
um[u] = true
}
}
return false
}
func (c *ServerConfig) bootstrapTimeout() time.Duration {
if c.BootstrapTimeout != 0 {
return c.BootstrapTimeout
}
return time.Second
}
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }

View File

@@ -1,212 +0,0 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"net/url"
"testing"
"go.etcd.io/etcd/pkg/v3/types"
"go.uber.org/zap"
)
func mustNewURLs(t *testing.T, urls []string) []url.URL {
if len(urls) == 0 {
return nil
}
u, err := types.NewURLs(urls)
if err != nil {
t.Fatalf("error creating new URLs from %q: %v", urls, err)
}
return u
}
func TestConfigVerifyBootstrapWithoutClusterAndDiscoveryURLFail(t *testing.T) {
c := &ServerConfig{
Name: "node1",
DiscoveryURL: "",
InitialPeerURLsMap: types.URLsMap{},
Logger: zap.NewExample(),
}
if err := c.VerifyBootstrap(); err == nil {
t.Errorf("err = nil, want not nil")
}
}
func TestConfigVerifyExistingWithDiscoveryURLFail(t *testing.T) {
cluster, err := types.NewURLsMap("node1=http://127.0.0.1:2380")
if err != nil {
t.Fatalf("NewCluster error: %v", err)
}
c := &ServerConfig{
Name: "node1",
DiscoveryURL: "http://127.0.0.1:2379/abcdefg",
PeerURLs: mustNewURLs(t, []string{"http://127.0.0.1:2380"}),
InitialPeerURLsMap: cluster,
NewCluster: false,
Logger: zap.NewExample(),
}
if err := c.VerifyJoinExisting(); err == nil {
t.Errorf("err = nil, want not nil")
}
}
func TestConfigVerifyLocalMember(t *testing.T) {
tests := []struct {
clusterSetting string
apurls []string
strict bool
shouldError bool
}{
{
// Node must exist in cluster
"",
nil,
true,
true,
},
{
// Initial cluster set
"node1=http://localhost:7001,node2=http://localhost:7002",
[]string{"http://localhost:7001"},
true,
false,
},
{
// Default initial cluster
"node1=http://localhost:2380,node1=http://localhost:7001",
[]string{"http://localhost:2380", "http://localhost:7001"},
true,
false,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:7001",
[]string{"http://localhost:12345"},
true,
true,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:2380,node1=http://localhost:12345",
[]string{"http://localhost:12345"},
true,
true,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:12345",
[]string{"http://localhost:2380", "http://localhost:12345"},
true,
true,
},
{
// Advertised peer URLs must match those in cluster-state
"node1=http://localhost:2380",
[]string{},
true,
true,
},
{
// do not care about the urls if strict is not set
"node1=http://localhost:2380",
[]string{},
false,
false,
},
}
for i, tt := range tests {
cluster, err := types.NewURLsMap(tt.clusterSetting)
if err != nil {
t.Fatalf("#%d: Got unexpected error: %v", i, err)
}
cfg := ServerConfig{
Name: "node1",
InitialPeerURLsMap: cluster,
Logger: zap.NewExample(),
}
if tt.apurls != nil {
cfg.PeerURLs = mustNewURLs(t, tt.apurls)
}
if err = cfg.hasLocalMember(); err == nil && tt.strict {
err = cfg.advertiseMatchesCluster()
}
if (err == nil) && tt.shouldError {
t.Errorf("#%d: Got no error where one was expected", i)
}
if (err != nil) && !tt.shouldError {
t.Errorf("#%d: Got unexpected error: %v", i, err)
}
}
}
func TestSnapDir(t *testing.T) {
tests := map[string]string{
"/": "/member/snap",
"/var/lib/etc": "/var/lib/etc/member/snap",
}
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
Logger: zap.NewExample(),
}
if g := cfg.SnapDir(); g != w {
t.Errorf("DataDir=%q: SnapDir()=%q, want=%q", dd, g, w)
}
}
}
func TestWALDir(t *testing.T) {
tests := map[string]string{
"/": "/member/wal",
"/var/lib/etc": "/var/lib/etc/member/wal",
}
for dd, w := range tests {
cfg := ServerConfig{
DataDir: dd,
Logger: zap.NewExample(),
}
if g := cfg.WALDir(); g != w {
t.Errorf("DataDir=%q: WALDir()=%q, want=%q", dd, g, w)
}
}
}
func TestShouldDiscover(t *testing.T) {
tests := map[string]bool{
"": false,
"foo": true,
"http://discovery.etcd.io/asdf": true,
}
for durl, w := range tests {
cfg := ServerConfig{
DiscoveryURL: durl,
Logger: zap.NewExample(),
}
if g := cfg.ShouldDiscover(); g != w {
t.Errorf("durl=%q: ShouldDiscover()=%t, want=%t", durl, g, w)
}
}
}

View File

@@ -30,6 +30,7 @@ import (
"go.etcd.io/etcd/pkg/v3/types"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/wal"
@@ -419,7 +420,7 @@ func (r *raftNode) advanceTicks(ticks int) {
}
}
func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
var err error
member := cl.MemberByName(cfg.Name)
metadata := pbutil.MustMarshal(
@@ -483,7 +484,7 @@ func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id
return id, n, s, w
}
func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
func restartNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
@@ -534,7 +535,7 @@ func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *member
return id, cl, n, s, w
}
func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
func restartAsStandaloneNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term

View File

@@ -33,6 +33,7 @@ import (
"github.com/coreos/go-semver/semver"
humanize "github.com/dustin/go-humanize"
"github.com/prometheus/client_golang/prometheus"
"go.etcd.io/etcd/server/v3/config"
"go.uber.org/zap"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
@@ -210,7 +211,7 @@ type EtcdServer struct {
r raftNode // uses 64-bit atomics; keep 64-bit aligned.
readych chan struct{}
Cfg ServerConfig
Cfg config.ServerConfig
lgMu *sync.RWMutex
lg *zap.Logger
@@ -295,7 +296,7 @@ type EtcdServer struct {
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
var (
@@ -339,7 +340,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
}
ss := snap.New(cfg.Logger, cfg.SnapDir())
bepath := cfg.backendPath()
bepath := cfg.BackendPath()
beExist := fileutil.Exist(bepath)
be := openBackend(cfg)
@@ -349,7 +350,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
}
}()
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
if err != nil {
return nil, err
}
@@ -394,7 +395,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
return nil, err
}
m := cl.MemberByName(cfg.Name)
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
@@ -408,7 +409,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
if err != nil {
return nil, err
}
if checkDuplicateURL(urlsmap) {
if config.CheckDuplicateURL(urlsmap) {
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
}
if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
@@ -611,7 +612,7 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
tr := &rafthttp.Transport{
Logger: cfg.Logger,
TLSInfo: cfg.PeerTLSInfo,
DialTimeout: cfg.peerDialTimeout(),
DialTimeout: cfg.PeerDialTimeout(),
ID: id,
URLs: cfg.PeerURLs,
ClusterID: cl.ID(),
@@ -2073,7 +2074,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
// promote lessor when the local member is leader and finished
// applying all entries from the last term.
if s.isLeader() {
s.lessor.Promote(s.Cfg.electionTimeout())
s.lessor.Promote(s.Cfg.ElectionTimeout())
}
return
}

View File

@@ -36,6 +36,7 @@ import (
"go.etcd.io/etcd/pkg/v3/wait"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
@@ -738,7 +739,7 @@ func TestDoProposal(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@@ -770,7 +771,7 @@ func TestDoProposalCancelled(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
w: wt,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@@ -794,7 +795,7 @@ func TestDoProposalTimeout(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}),
w: mockwait.NewNop(),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@@ -813,7 +814,7 @@ func TestDoProposalStopped(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: newNodeNop()}),
w: mockwait.NewNop(),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@@ -924,7 +925,7 @@ func TestSyncTrigger(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: mockstore.NewNop(),
SyncTicker: tk,
@@ -1066,7 +1067,7 @@ func TestSnapshotOrdering(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: st,
snapshotter: snap.New(zap.NewExample(), snapdir),
@@ -1141,7 +1142,7 @@ func TestTriggerSnap(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCount: uint64(snapc), SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
@@ -1219,7 +1220,7 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
s := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), DataDir: testdir, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
v2store: st,
snapshotter: snap.New(zap.NewExample(), testdir),
@@ -1430,7 +1431,7 @@ func TestPublish(t *testing.T) {
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
readych: make(chan struct{}),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
id: 1,
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
@@ -1483,7 +1484,7 @@ func TestPublishStopped(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *r,
cluster: &membership.RaftCluster{},
w: mockwait.NewNop(),
@@ -1507,7 +1508,7 @@ func TestPublishRetry(t *testing.T) {
srv := &EtcdServer{
lgMu: new(sync.RWMutex),
lg: zap.NewExample(),
Cfg: ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
Cfg: config.ServerConfig{Logger: zap.NewExample(), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries},
r: *newRaftNode(raftNodeConfig{lg: zap.NewExample(), Node: n}),
w: mockwait.NewNop(),
stopping: make(chan struct{}),