all config -> cfg

This commit is contained in:
Blake Mizerany 2014-08-13 17:04:29 -07:00 committed by Yicheng Qin
parent a5eec89113
commit 0881021e54
11 changed files with 63 additions and 63 deletions

View File

@ -36,7 +36,7 @@ const (
)
type Server struct {
config *conf.Config
cfg *conf.Config
id int64
pubAddr string
raftPubAddr string
@ -79,7 +79,7 @@ func New(c *conf.Config) (*Server, error) {
client := &http.Client{Transport: tr}
s := &Server{
config: c,
cfg: c,
id: genId(),
pubAddr: c.Addr,
raftPubAddr: c.Peer.Addr,
@ -99,7 +99,7 @@ func New(c *conf.Config) (*Server, error) {
s.Handler = m
log.Printf("id=%x server.new raftPubAddr=%s\n", s.id, s.raftPubAddr)
if err = os.MkdirAll(s.config.DataDir, 0700); err != nil {
if err = os.MkdirAll(s.cfg.DataDir, 0700); err != nil {
if !os.IsExist(err) {
return nil, err
}
@ -152,7 +152,7 @@ func (s *Server) Run() error {
var exit error
defer func() { s.exited <- exit }()
durl := s.config.Discovery
durl := s.cfg.Discovery
if durl != "" {
u, err := url.Parse(durl)
if err != nil {
@ -166,7 +166,7 @@ func (s *Server) Run() error {
}
log.Printf("id=%x server.run source=-discovery seeds=\"%v\"\n", s.id, seeds)
} else {
seeds = s.config.Peers
seeds = s.cfg.Peers
log.Printf("id=%x server.run source=-peers seeds=\"%v\"\n", s.id, seeds)
}
s.peerHub.setSeeds(seeds)
@ -175,7 +175,7 @@ func (s *Server) Run() error {
for {
switch next {
case participantMode:
p, err := newParticipant(s.id, s.pubAddr, s.raftPubAddr, s.config.DataDir, s.client, s.peerHub, s.tickDuration)
p, err := newParticipant(s.id, s.pubAddr, s.raftPubAddr, s.cfg.DataDir, s.client, s.peerHub, s.tickDuration)
if err != nil {
log.Printf("id=%x server.run newParicipanteErr=\"%v\"\n", s.id, err)
exit = err

View File

@ -55,7 +55,7 @@ func TestKillLeader(t *testing.T) {
fmt.Println("Total time:", totalTime, "; Avg time:", avgTime)
c := conf.New()
c.DataDir = es[lead].config.DataDir
c.DataDir = es[lead].cfg.DataDir
c.Addr = hs[lead].Listener.Addr().String()
id := es[lead].id
e, h, err := buildServer(t, c, id)
@ -96,7 +96,7 @@ func TestKillRandom(t *testing.T) {
for k := range toKill {
c := conf.New()
c.DataDir = es[k].config.DataDir
c.DataDir = es[k].cfg.DataDir
c.Addr = hs[k].Listener.Addr().String()
id := es[k].id
e, h, err := buildServer(t, c, id)
@ -162,7 +162,7 @@ func TestClusterConfigReload(t *testing.T) {
for k := range es {
c := conf.New()
c.DataDir = es[k].config.DataDir
c.DataDir = es[k].cfg.DataDir
c.Addr = hs[k].Listener.Addr().String()
id := es[k].id
e, h, err := buildServer(t, c, id)
@ -201,7 +201,7 @@ func TestMultiNodeKillOne(t *testing.T) {
hs[idx].Close()
c := conf.New()
c.DataDir = es[idx].config.DataDir
c.DataDir = es[idx].cfg.DataDir
c.Addr = hs[idx].Listener.Addr().String()
id := es[idx].id
e, h, err := buildServer(t, c, id)
@ -242,7 +242,7 @@ func TestMultiNodeKillAllAndRecovery(t *testing.T) {
for k := range es {
c := conf.New()
c.DataDir = es[k].config.DataDir
c.DataDir = es[k].cfg.DataDir
c.Addr = hs[k].Listener.Addr().String()
id := es[k].id
e, h, err := buildServer(t, c, id)
@ -291,8 +291,8 @@ func TestModeSwitch(t *testing.T) {
es, hs := buildCluster(size, false)
waitCluster(t, es)
config := conf.NewClusterConfig()
config.SyncInterval = 0
cfg := conf.NewClusterConfig()
cfg.SyncInterval = 0
id := int64(i)
for j := 0; j < round; j++ {
lead, _ := waitActiveLeader(es)
@ -301,8 +301,8 @@ func TestModeSwitch(t *testing.T) {
continue
}
config.ActiveSize = size - 1
if err := es[lead].p.setClusterConfig(config); err != nil {
cfg.ActiveSize = size - 1
if err := es[lead].p.setClusterConfig(cfg); err != nil {
t.Fatalf("#%d: setClusterConfig err = %v", i, err)
}
if err := es[lead].p.remove(id); err != nil {
@ -321,8 +321,8 @@ func TestModeSwitch(t *testing.T) {
t.Errorf("#%d: lead = %d, want %d", i, g, lead)
}
config.ActiveSize = size
if err := es[lead].p.setClusterConfig(config); err != nil {
cfg.ActiveSize = size
if err := es[lead].p.setClusterConfig(cfg); err != nil {
t.Fatalf("#%d: setClusterConfig err = %v", i, err)
}

View File

@ -144,7 +144,7 @@ func TestRunByAdvisedPeers(t *testing.T) {
}
func TestRunByDiscoveryService(t *testing.T) {
de, dh, _ := buildServer(t, config.New(), genId())
de, dh, _ := buildServer(t, cfg.New(), genId())
tc := NewTestClient()
v := url.Values{}
@ -162,7 +162,7 @@ func TestRunByDiscoveryService(t *testing.T) {
}
resp.Body.Close()
c := config.New()
c := cfg.New()
c.Discovery = dh.URL + "/v2/keys/_etcd/registry/1"
e, h, err := buildServer(t, c, bootstrapId)
if err != nil {
@ -182,7 +182,7 @@ func TestRunByDataDir(t *testing.T) {
TestSingleNodeRecovery(t)
}
func buildServer(t *testing.T, c *config.Config, id int64) (e *Server, h *httptest.Server, err error) {
func buildServer(t *testing.T, c *cfg.Config, id int64) (e *Server, h *httptest.Server, err error) {
e, h = initTestServer(c, id, false)
go func() { err = e.Run() }()
for {

View File

@ -90,7 +90,7 @@ func TestAdd(t *testing.T) {
es := make([]*Server, tt)
hs := make([]*httptest.Server, tt)
for i := 0; i < tt; i++ {
c := config.New()
c := cfg.New()
if i > 0 {
c.Peers = []string{hs[0].URL}
}
@ -149,9 +149,9 @@ func TestRemove(t *testing.T) {
waitCluster(t, es)
lead, _ := waitLeader(es)
config := config.NewClusterConfig()
config.ActiveSize = 0
if err := es[lead].p.setClusterConfig(config); err != nil {
cfg := cfg.NewClusterConfig()
cfg.ActiveSize = 0
if err := es[lead].p.setClusterConfig(cfg); err != nil {
t.Fatalf("#%d: setClusterConfig err = %v", k, err)
}
@ -216,11 +216,11 @@ func TestBecomeStandby(t *testing.T) {
}
id := int64(i)
config := config.NewClusterConfig()
config.SyncInterval = 1000
cfg := cfg.NewClusterConfig()
cfg.SyncInterval = 1000
config.ActiveSize = size - 1
if err := es[lead].p.setClusterConfig(config); err != nil {
cfg.ActiveSize = size - 1
if err := es[lead].p.setClusterConfig(cfg); err != nil {
t.Fatalf("#%d: setClusterConfig err = %v", i, err)
}
for {
@ -320,7 +320,7 @@ func TestSingleNodeRecovery(t *testing.T) {
if err != nil {
panic(err)
}
c := config.New()
c := cfg.New()
c.DataDir = dataDir
e, h, _ := buildServer(t, c, id)
key := "/foo"
@ -348,7 +348,7 @@ func TestSingleNodeRecovery(t *testing.T) {
time.Sleep(2 * time.Second)
c = config.New()
c = cfg.New()
c.DataDir = dataDir
e, h, _ = buildServer(t, c, id)
@ -395,7 +395,7 @@ func TestRestoreSnapshotFromLeader(t *testing.T) {
}
// create one to join the cluster
c := config.New()
c := cfg.New()
c.Peers = []string{hs[0].URL}
e, h := initTestServer(c, 1, false)
go e.Run()
@ -445,7 +445,7 @@ func buildCluster(number int, tls bool) ([]*Server, []*httptest.Server) {
var seed string
for i := range es {
c := config.New()
c := cfg.New()
if seed != "" {
c.Peers = []string{seed}
}
@ -468,7 +468,7 @@ func buildCluster(number int, tls bool) ([]*Server, []*httptest.Server) {
return es, hs
}
func initTestServer(c *config.Config, id int64, tls bool) (e *Server, h *httptest.Server) {
func initTestServer(c *cfg.Config, id int64, tls bool) (e *Server, h *httptest.Server) {
if c.DataDir == "" {
n, err := ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
@ -531,7 +531,7 @@ func destoryCluster(t *testing.T, es []*Server, hs []*httptest.Server) {
for i := range es {
e := es[len(es)-i-1]
e.Stop()
err := os.RemoveAll(e.config.DataDir)
err := os.RemoveAll(e.cfg.DataDir)
if err != nil {
panic(err)
t.Fatal(err)
@ -545,7 +545,7 @@ func destoryCluster(t *testing.T, es []*Server, hs []*httptest.Server) {
func destroyServer(t *testing.T, e *Server, h *httptest.Server) {
e.Stop()
h.Close()
err := os.RemoveAll(e.config.DataDir)
err := os.RemoveAll(e.cfg.DataDir)
if err != nil {
panic(err)
t.Fatal(err)

View File

@ -115,7 +115,7 @@ func (s *standby) syncCluster(nodes map[string]bool) (map[string]bool, error) {
if err != nil {
continue
}
config, err := s.client.GetClusterConfig(node)
cfg, err := s.client.GetClusterConfig(node)
if err != nil {
continue
}
@ -130,7 +130,7 @@ func (s *standby) syncCluster(nodes map[string]bool) (map[string]bool, error) {
s.setLeaderInfo(id, machine.PeerURL)
}
}
s.clusterConf = config
s.clusterConf = cfg
return nn, nil
}
return nil, fmt.Errorf("unreachable cluster")

View File

@ -136,11 +136,11 @@ func (c *v2client) GetClusterConfig(url string) (*conf.ClusterConfig, *etcdErr.E
return nil, c.readErrorBody(resp.Body)
}
config := new(conf.ClusterConfig)
if uerr := c.readJSONBody(resp.Body, config); uerr != nil {
cfg := new(conf.ClusterConfig)
if uerr := c.readJSONBody(resp.Body, cfg); uerr != nil {
return nil, uerr
}
return config, nil
return cfg, nil
}
// AddMachine adds machine to the cluster.

20
main.go
View File

@ -14,35 +14,35 @@ import (
)
func main() {
var config = conf.New()
if err := config.Load(os.Args[1:]); err != nil {
var cfg = conf.New()
if err := cfg.Load(os.Args[1:]); err != nil {
fmt.Println(etcd.Usage() + "\n")
fmt.Println(err.Error(), "\n")
os.Exit(1)
} else if config.ShowVersion {
} else if cfg.ShowVersion {
fmt.Println("0.5")
os.Exit(0)
} else if config.ShowHelp {
} else if cfg.ShowHelp {
os.Exit(0)
}
e, err := etcd.New(config)
e, err := etcd.New(cfg)
if err != nil {
log.Fatal("etcd:", err)
}
go e.Run()
corsInfo, err := newCORSInfo(config.CorsOrigins)
corsInfo, err := newCORSInfo(cfg.CorsOrigins)
if err != nil {
log.Fatal("cors:", err)
}
readTimeout := time.Duration(config.HTTPReadTimeout) * time.Second
writeTimeout := time.Duration(config.HTTPWriteTimeout) * time.Second
readTimeout := time.Duration(cfg.HTTPReadTimeout) * time.Second
writeTimeout := time.Duration(cfg.HTTPWriteTimeout) * time.Second
go func() {
serve("raft", config.Peer.BindAddr, config.PeerTLSInfo(), corsInfo, e.RaftHandler(), readTimeout, writeTimeout)
serve("raft", cfg.Peer.BindAddr, cfg.PeerTLSInfo(), corsInfo, e.RaftHandler(), readTimeout, writeTimeout)
}()
serve("etcd", config.BindAddr, config.EtcdTLSInfo(), corsInfo, e, readTimeout, writeTimeout)
serve("etcd", cfg.BindAddr, cfg.EtcdTLSInfo(), corsInfo, e, readTimeout, writeTimeout)
}
func serve(who string, addr string, tinfo *conf.TLSInfo, cinfo *CORSInfo, handler http.Handler, readTimeout, writeTimeout time.Duration) {

View File

@ -85,11 +85,11 @@ func (c *Client) GetClusterConfig(url string) (*ClusterConfig, *etcdErr.Error) {
return nil, clientError(err)
}
config := new(ClusterConfig)
if uerr := c.parseJSONResponse(resp, config); uerr != nil {
cfg := new(ClusterConfig)
if uerr := c.parseJSONResponse(resp, cfg); uerr != nil {
return nil, uerr
}
return config, nil
return cfg, nil
}
// AddMachine adds machine to the cluster.

View File

@ -204,19 +204,19 @@ func (ps *PeerServer) setClusterConfigHttpHandler(w http.ResponseWriter, req *ht
}
// Copy config and update fields passed in.
config := ps.ClusterConfig()
cfg := ps.ClusterConfig()
if activeSize, ok := m["activeSize"].(float64); ok {
config.ActiveSize = int(activeSize)
cfg.ActiveSize = int(activeSize)
}
if removeDelay, ok := m["removeDelay"].(float64); ok {
config.RemoveDelay = removeDelay
cfg.RemoveDelay = removeDelay
}
if syncInterval, ok := m["syncInterval"].(float64); ok {
config.SyncInterval = syncInterval
cfg.SyncInterval = syncInterval
}
// Issue command to update.
c := &SetClusterConfigCommand{Config: config}
c := &SetClusterConfigCommand{Config: cfg}
log.Debugf("[recv] Update Cluster Config Request")
ps.server.Dispatch(c, w, req)

View File

@ -54,9 +54,9 @@ type StandbyServer struct {
sync.Mutex
}
func NewStandbyServer(config StandbyServerConfig, client *Client) *StandbyServer {
func NewStandbyServer(cfg StandbyServerConfig, client *Client) *StandbyServer {
s := &StandbyServer{
Config: config,
Config: cfg,
client: client,
standbyInfo: standbyInfo{SyncInterval: DefaultSyncInterval},
}
@ -229,14 +229,14 @@ func (s *StandbyServer) syncCluster(peerURLs []string) error {
continue
}
config, err := s.client.GetClusterConfig(peerURL)
cfg, err := s.client.GetClusterConfig(peerURL)
if err != nil {
log.Debugf("fail getting cluster config from %v", peerURL)
continue
}
s.setCluster(machines)
s.SetSyncInterval(config.SyncInterval)
s.SetSyncInterval(cfg.SyncInterval)
if err := s.saveInfo(); err != nil {
log.Warnf("fail saving cluster info into disk: %v", err)
}

View File

@ -20,7 +20,7 @@ const (
// Starts a new server.
func RunServer(f func(*server.Server)) {
c := config.New()
c := cfg.New()
c.Name = testName
c.Addr = testClientURL