mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #9913 from gyuho/log
*: combine all configuration print logs to embed.StartEtcd
This commit is contained in:
117
embed/etcd.go
117
embed/etcd.go
@@ -23,6 +23,7 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
@@ -39,6 +40,7 @@ import (
|
||||
runtimeutil "github.com/coreos/etcd/pkg/runtime"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
@@ -164,6 +166,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
||||
DataDir: cfg.Dir,
|
||||
DedicatedWALDir: cfg.WalDir,
|
||||
SnapshotCount: cfg.SnapshotCount,
|
||||
SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
|
||||
MaxSnapFiles: cfg.MaxSnapFiles,
|
||||
MaxWALFiles: cfg.MaxWalFiles,
|
||||
InitialPeerURLsMap: urlsmap,
|
||||
@@ -196,35 +199,11 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
||||
Debug: cfg.Debug,
|
||||
ForceNewCluster: cfg.ForceNewCluster,
|
||||
}
|
||||
print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
|
||||
if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
|
||||
return e, err
|
||||
}
|
||||
|
||||
if len(e.cfg.CORS) > 0 {
|
||||
ss := make([]string, 0, len(e.cfg.CORS))
|
||||
for v := range e.cfg.CORS {
|
||||
ss = append(ss, v)
|
||||
}
|
||||
sort.Strings(ss)
|
||||
if e.cfg.logger != nil {
|
||||
e.cfg.logger.Info("configured CORS", zap.Strings("cors", ss))
|
||||
} else {
|
||||
plog.Infof("%s starting with cors %q", e.Server.ID(), ss)
|
||||
}
|
||||
}
|
||||
if len(e.cfg.HostWhitelist) > 0 {
|
||||
ss := make([]string, 0, len(e.cfg.HostWhitelist))
|
||||
for v := range e.cfg.HostWhitelist {
|
||||
ss = append(ss, v)
|
||||
}
|
||||
sort.Strings(ss)
|
||||
if e.cfg.logger != nil {
|
||||
e.cfg.logger.Info("configured host whitelist", zap.Strings("hosts", ss))
|
||||
} else {
|
||||
plog.Infof("%s starting with host whitelist %q", e.Server.ID(), ss)
|
||||
}
|
||||
}
|
||||
|
||||
// buffer channel so goroutines on closed connections won't wait forever
|
||||
e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
|
||||
|
||||
@@ -265,6 +244,94 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func print(lg *zap.Logger, ec Config, sc etcdserver.ServerConfig, memberInitialized bool) {
|
||||
// TODO: remove this after dropping "capnslog"
|
||||
if lg == nil {
|
||||
plog.Infof("name = %s", ec.Name)
|
||||
if sc.ForceNewCluster {
|
||||
plog.Infof("force new cluster")
|
||||
}
|
||||
plog.Infof("data dir = %s", sc.DataDir)
|
||||
plog.Infof("member dir = %s", sc.MemberDir())
|
||||
if sc.DedicatedWALDir != "" {
|
||||
plog.Infof("dedicated WAL dir = %s", sc.DedicatedWALDir)
|
||||
}
|
||||
plog.Infof("heartbeat = %dms", sc.TickMs)
|
||||
plog.Infof("election = %dms", sc.ElectionTicks*int(sc.TickMs))
|
||||
plog.Infof("snapshot count = %d", sc.SnapshotCount)
|
||||
if len(sc.DiscoveryURL) != 0 {
|
||||
plog.Infof("discovery URL= %s", sc.DiscoveryURL)
|
||||
if len(sc.DiscoveryProxy) != 0 {
|
||||
plog.Infof("discovery proxy = %s", sc.DiscoveryProxy)
|
||||
}
|
||||
}
|
||||
plog.Infof("advertise client URLs = %s", sc.ClientURLs)
|
||||
if memberInitialized {
|
||||
plog.Infof("initial advertise peer URLs = %s", sc.PeerURLs)
|
||||
plog.Infof("initial cluster = %s", sc.InitialPeerURLsMap)
|
||||
}
|
||||
} else {
|
||||
cors := make([]string, 0, len(ec.CORS))
|
||||
for v := range ec.CORS {
|
||||
cors = append(cors, v)
|
||||
}
|
||||
sort.Strings(cors)
|
||||
|
||||
hss := make([]string, 0, len(ec.HostWhitelist))
|
||||
for v := range ec.HostWhitelist {
|
||||
hss = append(hss, v)
|
||||
}
|
||||
sort.Strings(hss)
|
||||
|
||||
quota := ec.QuotaBackendBytes
|
||||
if quota == 0 {
|
||||
quota = etcdserver.DefaultQuotaBytes
|
||||
}
|
||||
|
||||
lg.Info(
|
||||
"starting an etcd server",
|
||||
zap.String("etcd-version", version.Version),
|
||||
zap.String("git-sha", version.GitSHA),
|
||||
zap.String("go-version", runtime.Version()),
|
||||
zap.String("go-os", runtime.GOOS),
|
||||
zap.String("go-arch", runtime.GOARCH),
|
||||
zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)),
|
||||
zap.Int("max-cpu-available", runtime.NumCPU()),
|
||||
zap.Bool("member-initialized", memberInitialized),
|
||||
zap.String("name", sc.Name),
|
||||
zap.String("data-dir", sc.DataDir),
|
||||
zap.String("wal-dir", ec.WalDir),
|
||||
zap.String("wal-dir-dedicated", sc.DedicatedWALDir),
|
||||
zap.String("member-dir", sc.MemberDir()),
|
||||
zap.Bool("force-new-cluster", sc.ForceNewCluster),
|
||||
zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)),
|
||||
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
|
||||
zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
|
||||
zap.Uint64("snapshot-count", sc.SnapshotCount),
|
||||
zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
|
||||
zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
|
||||
zap.Strings("listen-peer-urls", ec.getLPURLs()),
|
||||
zap.Strings("advertise-client-urls", ec.getACURLs()),
|
||||
zap.Strings("listen-client-urls", ec.getLCURLs()),
|
||||
zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
|
||||
zap.Strings("cors", cors),
|
||||
zap.Strings("host-whitelist", hss),
|
||||
zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
|
||||
zap.String("initial-cluster-state", ec.ClusterState),
|
||||
zap.String("initial-cluster-token", sc.InitialClusterToken),
|
||||
zap.Int64("quota-size-bytes", quota),
|
||||
zap.Bool("pre-vote", sc.PreVote),
|
||||
zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
|
||||
zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
|
||||
zap.String("auto-compaction-mode", sc.AutoCompactionMode),
|
||||
zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
|
||||
zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
|
||||
zap.String("discovery-url", sc.DiscoveryURL),
|
||||
zap.String("discovery-proxy", sc.DiscoveryProxy),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Config returns the current configuration.
|
||||
func (e *Etcd) Config() Config {
|
||||
return e.cfg
|
||||
|
||||
@@ -78,27 +78,17 @@ func startEtcdOrProxyV2() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
maxProcs, cpus := runtime.GOMAXPROCS(0), runtime.NumCPU()
|
||||
|
||||
lg := cfg.ec.GetLogger()
|
||||
if lg != nil {
|
||||
lg.Info(
|
||||
"starting etcd",
|
||||
zap.String("etcd-version", version.Version),
|
||||
zap.String("git-sha", version.GitSHA),
|
||||
zap.String("go-version", runtime.Version()),
|
||||
zap.String("go-os", runtime.GOOS),
|
||||
zap.String("go-arch", runtime.GOARCH),
|
||||
zap.Int("max-cpu-set", maxProcs),
|
||||
zap.Int("max-cpu-available", cpus),
|
||||
)
|
||||
} else {
|
||||
|
||||
if lg == nil {
|
||||
// TODO: remove in 3.5
|
||||
plog.Infof("etcd Version: %s\n", version.Version)
|
||||
plog.Infof("Git SHA: %s\n", version.GitSHA)
|
||||
plog.Infof("Go Version: %s\n", runtime.Version())
|
||||
plog.Infof("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH)
|
||||
plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", maxProcs, cpus)
|
||||
plog.Infof("setting maximum number of CPUs to %d, total number of available CPUs is %d", runtime.GOMAXPROCS(0), runtime.NumCPU())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
logger := cfg.ec.GetLogger()
|
||||
if logger != nil {
|
||||
|
||||
@@ -266,71 +266,6 @@ func (c *ServerConfig) peerDialTimeout() time.Duration {
|
||||
return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) PrintWithInitial() { c.print(true) }
|
||||
|
||||
func (c *ServerConfig) Print() { c.print(false) }
|
||||
|
||||
func (c *ServerConfig) print(initial bool) {
|
||||
// TODO: remove this after dropping "capnslog"
|
||||
if c.Logger == nil {
|
||||
plog.Infof("name = %s", c.Name)
|
||||
if c.ForceNewCluster {
|
||||
plog.Infof("force new cluster")
|
||||
}
|
||||
plog.Infof("data dir = %s", c.DataDir)
|
||||
plog.Infof("member dir = %s", c.MemberDir())
|
||||
if c.DedicatedWALDir != "" {
|
||||
plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
|
||||
}
|
||||
plog.Infof("heartbeat = %dms", c.TickMs)
|
||||
plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
|
||||
plog.Infof("snapshot count = %d", c.SnapshotCount)
|
||||
if len(c.DiscoveryURL) != 0 {
|
||||
plog.Infof("discovery URL= %s", c.DiscoveryURL)
|
||||
if len(c.DiscoveryProxy) != 0 {
|
||||
plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
|
||||
}
|
||||
}
|
||||
plog.Infof("advertise client URLs = %s", c.ClientURLs)
|
||||
if initial {
|
||||
plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
|
||||
plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
|
||||
}
|
||||
} else {
|
||||
state := "new"
|
||||
if !c.NewCluster {
|
||||
state = "existing"
|
||||
}
|
||||
c.Logger.Info(
|
||||
"server configuration",
|
||||
zap.String("name", c.Name),
|
||||
zap.String("data-dir", c.DataDir),
|
||||
zap.String("member-dir", c.MemberDir()),
|
||||
zap.String("dedicated-wal-dir", c.DedicatedWALDir),
|
||||
zap.Bool("force-new-cluster", c.ForceNewCluster),
|
||||
zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(c.TickMs)*time.Millisecond)),
|
||||
zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond)),
|
||||
zap.Bool("initial-election-tick-advance", c.InitialElectionTickAdvance),
|
||||
zap.Uint64("snapshot-count", c.SnapshotCount),
|
||||
zap.Uint64("snapshot-catchup-entries", c.SnapshotCatchUpEntries),
|
||||
zap.Strings("advertise-client-urls", c.getACURLs()),
|
||||
zap.Strings("initial-advertise-peer-urls", c.getAPURLs()),
|
||||
zap.Bool("initial", initial),
|
||||
zap.String("initial-cluster", c.InitialPeerURLsMap.String()),
|
||||
zap.String("initial-cluster-state", state),
|
||||
zap.String("initial-cluster-token", c.InitialClusterToken),
|
||||
zap.Bool("pre-vote", c.PreVote),
|
||||
zap.Bool("initial-corrupt-check", c.InitialCorruptCheck),
|
||||
zap.String("corrupt-check-time-interval", c.CorruptCheckTime.String()),
|
||||
zap.String("auto-compaction-mode", c.AutoCompactionMode),
|
||||
zap.Duration("auto-compaction-retention", c.AutoCompactionRetention),
|
||||
zap.String("auto-compaction-interval", c.AutoCompactionRetention.String()),
|
||||
zap.String("discovery-url", c.DiscoveryURL),
|
||||
zap.String("discovery-proxy", c.DiscoveryProxy),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func checkDuplicateURL(urlsmap types.URLsMap) bool {
|
||||
um := make(map[string]bool)
|
||||
for _, urls := range urlsmap {
|
||||
@@ -353,19 +288,3 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration {
|
||||
}
|
||||
|
||||
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
|
||||
|
||||
func (c *ServerConfig) getAPURLs() (ss []string) {
|
||||
ss = make([]string, len(c.PeerURLs))
|
||||
for i := range c.PeerURLs {
|
||||
ss[i] = c.PeerURLs[i].String()
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
func (c *ServerConfig) getACURLs() (ss []string) {
|
||||
ss = make([]string, len(c.ClientURLs))
|
||||
for i := range c.ClientURLs {
|
||||
ss[i] = c.ClientURLs[i].String()
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ var (
|
||||
// only log once
|
||||
quotaLogOnce sync.Once
|
||||
|
||||
defaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
|
||||
DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
|
||||
maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes))
|
||||
)
|
||||
|
||||
@@ -99,7 +99,7 @@ func NewBackendQuota(s *EtcdServer, name string) Quota {
|
||||
"enabled backend quota with default value",
|
||||
zap.String("quota-name", name),
|
||||
zap.Int64("quota-size-bytes", DefaultQuotaBytes),
|
||||
zap.String("quota-size", defaultQuotaSize),
|
||||
zap.String("quota-size", DefaultQuotaSize),
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -357,7 +357,6 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
cl.SetID(types.ID(0), existingCluster.ID())
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(be)
|
||||
cfg.Print()
|
||||
id, n, s, w = startNode(cfg, cl, nil)
|
||||
cl.SetID(id, existingCluster.ID())
|
||||
|
||||
@@ -393,7 +392,6 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(be)
|
||||
cfg.PrintWithInitial()
|
||||
id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
|
||||
cl.SetID(id, cl.ID())
|
||||
|
||||
@@ -458,8 +456,6 @@ func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
cfg.Print()
|
||||
|
||||
if !cfg.ForceNewCluster {
|
||||
id, cl, n, s, w = restartNode(cfg, snapshot)
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user