From a2b14494310530f4e2f1efcffaf78f78abbf522b Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 12 Apr 2018 10:26:17 -0700 Subject: [PATCH 1/4] snapshot: enforce single endpoint in client, change interface To enforce single endpoint in client configuration. And pass client object only to "Save" method. Signed-off-by: Gyuho Lee --- snapshot/v3_snapshot.go | 200 +++++++++++++++++++++-------------- snapshot/v3_snapshot_test.go | 46 ++++---- 2 files changed, 137 insertions(+), 109 deletions(-) diff --git a/snapshot/v3_snapshot.go b/snapshot/v3_snapshot.go index a74408a3f..48847e5b7 100644 --- a/snapshot/v3_snapshot.go +++ b/snapshot/v3_snapshot.go @@ -25,6 +25,7 @@ import ( "os" "path/filepath" "reflect" + "time" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver" @@ -48,65 +49,34 @@ import ( // Manager defines snapshot methods. type Manager interface { - // Save fetches snapshot from remote etcd server and saves data to target path. - // If the context "ctx" is canceled or timed out, snapshot save stream will error out - // (e.g. context.Canceled, context.DeadlineExceeded). - Save(ctx context.Context, dbPath string) error + // Save fetches snapshot from remote etcd server and saves data + // to target path. If the context "ctx" is canceled or timed out, + // snapshot save stream will error out (e.g. context.Canceled, + // context.DeadlineExceeded). Make sure to specify only one endpoint + // in client configuration. Snapshot API must be requested to a + // selected node, and saved snapshot is the point-in-time state of + // the selected node. + Save(ctx context.Context, cfg clientv3.Config, dbPath string) error // Status returns the snapshot file information. Status(dbPath string) (Status, error) - // Restore restores a new etcd data directory from given snapshot file. - // It returns an error if specified data directory already exists, to - // prevent unintended data directory overwrites. - Restore(dbPath string, cfg RestoreConfig) error -} - -// Status is the snapshot file status. -type Status struct { - Hash uint32 `json:"hash"` - Revision int64 `json:"revision"` - TotalKey int `json:"totalKey"` - TotalSize int64 `json:"totalSize"` -} - -// RestoreConfig configures snapshot restore operation. -type RestoreConfig struct { - // Name is the human-readable name of this member. - Name string - // OutputDataDir is the target data directory to save restored data. - // OutputDataDir should not conflict with existing etcd data directory. - // If OutputDataDir already exists, it will return an error to prevent - // unintended data directory overwrites. - // Defaults to "[Name].etcd" if not given. - OutputDataDir string - // OutputWALDir is the target WAL data directory. - // Defaults to "[OutputDataDir]/member/wal" if not given. - OutputWALDir string - // InitialCluster is the initial cluster configuration for restore bootstrap. - InitialCluster types.URLsMap - // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap. - InitialClusterToken string - // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster. - PeerURLs types.URLs - // SkipHashCheck is "true" to ignore snapshot integrity hash value - // (required if copied from data directory). - SkipHashCheck bool + // Restore restores a new etcd data directory from given snapshot + // file. It returns an error if specified data directory already + // exists, to prevent unintended data directory overwrites. + Restore(cfg RestoreConfig) error } // NewV3 returns a new snapshot Manager for v3.x snapshot. -// "*clientv3.Client" is only used for "Save" method. -// Otherwise, pass "nil". -func NewV3(cli *clientv3.Client, lg *zap.Logger) Manager { +func NewV3(lg *zap.Logger) Manager { if lg == nil { lg = zap.NewExample() } - return &v3Manager{cli: cli, lg: lg} + return &v3Manager{lg: lg} } type v3Manager struct { - cli *clientv3.Client - lg *zap.Logger + lg *zap.Logger name string dbPath string @@ -117,11 +87,23 @@ type v3Manager struct { skipHashCheck bool } -func (s *v3Manager) Save(ctx context.Context, dbPath string) error { - partpath := dbPath + ".part" - f, err := os.Create(partpath) +// Save fetches snapshot from remote etcd server and saves data to target path. +func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) error { + if len(cfg.Endpoints) != 1 { + return fmt.Errorf("snapshot must be requested to one selected node, not multiple %v", cfg.Endpoints) + } + cli, err := clientv3.New(cfg) + if err != nil { + return err + } + defer cli.Close() + + partpath := dbPath + ".part" + defer os.RemoveAll(partpath) + + var f *os.File + f, err = os.Create(partpath) if err != nil { - os.RemoveAll(partpath) return fmt.Errorf("could not open %s (%v)", partpath, err) } s.lg.Info( @@ -129,34 +111,47 @@ func (s *v3Manager) Save(ctx context.Context, dbPath string) error { zap.String("path", partpath), ) + now := time.Now() var rd io.ReadCloser - rd, err = s.cli.Snapshot(ctx) + rd, err = cli.Snapshot(ctx) if err != nil { - os.RemoveAll(partpath) return err } - s.lg.Info("copying from snapshot stream") + s.lg.Info( + "fetching snapshot", + zap.String("endpoint", cfg.Endpoints[0]), + ) if _, err = io.Copy(f, rd); err != nil { - os.RemoveAll(partpath) return err } if err = fileutil.Fsync(f); err != nil { - os.RemoveAll(partpath) return err } if err = f.Close(); err != nil { - os.RemoveAll(partpath) return err } + s.lg.Info( + "fetched snapshot", + zap.String("endpoint", cfg.Endpoints[0]), + zap.Duration("took", time.Since(now)), + ) - s.lg.Info("rename", zap.String("from", partpath), zap.String("to", dbPath)) if err = os.Rename(partpath, dbPath); err != nil { - os.RemoveAll(partpath) return fmt.Errorf("could not rename %s to %s (%v)", partpath, dbPath, err) } + s.lg.Info("saved", zap.String("path", dbPath)) return nil } +// Status is the snapshot file status. +type Status struct { + Hash uint32 `json:"hash"` + Revision int64 `json:"revision"` + TotalKey int `json:"totalKey"` + TotalSize int64 `json:"totalSize"` +} + +// Status returns the snapshot file information. func (s *v3Manager) Status(dbPath string) (ds Status, err error) { if _, err = os.Stat(dbPath); err != nil { return ds, err @@ -200,19 +195,60 @@ func (s *v3Manager) Status(dbPath string) (ds Status, err error) { return ds, nil } -func (s *v3Manager) Restore(dbPath string, cfg RestoreConfig) error { - srv := etcdserver.ServerConfig{ - Name: cfg.Name, - InitialClusterToken: cfg.InitialClusterToken, - InitialPeerURLsMap: cfg.InitialCluster, - PeerURLs: cfg.PeerURLs, +// RestoreConfig configures snapshot restore operation. +type RestoreConfig struct { + // SnapshotPath is the path of snapshot file to restore from. + SnapshotPath string + + // Name is the human-readable name of this member. + Name string + + // OutputDataDir is the target data directory to save restored data. + // OutputDataDir should not conflict with existing etcd data directory. + // If OutputDataDir already exists, it will return an error to prevent + // unintended data directory overwrites. + // If empty, defaults to "[Name].etcd" if not given. + OutputDataDir string + // OutputWALDir is the target WAL data directory. + // If empty, defaults to "[OutputDataDir]/member/wal" if not given. + OutputWALDir string + + // PeerURLs is a list of member's peer URLs to advertise to the rest of the cluster. + PeerURLs []string + + // InitialCluster is the initial cluster configuration for restore bootstrap. + InitialCluster string + // InitialClusterToken is the initial cluster token for etcd cluster during restore bootstrap. + InitialClusterToken string + + // SkipHashCheck is "true" to ignore snapshot integrity hash value + // (required if copied from data directory). + SkipHashCheck bool +} + +// Restore restores a new etcd data directory from given snapshot file. +func (s *v3Manager) Restore(cfg RestoreConfig) error { + pURLs, err := types.NewURLs(cfg.PeerURLs) + if err != nil { + return err } - if err := srv.VerifyBootstrap(); err != nil { + var ics types.URLsMap + ics, err = types.NewURLsMap(cfg.InitialCluster) + if err != nil { return err } - var err error - s.cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialCluster) + srv := etcdserver.ServerConfig{ + Name: cfg.Name, + PeerURLs: pURLs, + InitialPeerURLsMap: ics, + InitialClusterToken: cfg.InitialClusterToken, + } + if err = srv.VerifyBootstrap(); err != nil { + return err + } + + s.cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, ics) if err != nil { return err } @@ -221,44 +257,44 @@ func (s *v3Manager) Restore(dbPath string, cfg RestoreConfig) error { if dataDir == "" { dataDir = cfg.Name + ".etcd" } - if _, err = os.Stat(dataDir); err == nil { + if fileutil.Exist(dataDir) { return fmt.Errorf("data-dir %q exists", dataDir) } + walDir := cfg.OutputWALDir if walDir == "" { walDir = filepath.Join(dataDir, "member", "wal") - } else if _, err = os.Stat(walDir); err == nil { + } else if fileutil.Exist(walDir) { return fmt.Errorf("wal-dir %q exists", walDir) } - s.lg.Info( - "restoring snapshot file", - zap.String("path", dbPath), - zap.String("wal-dir", walDir), - zap.String("data-dir", dataDir), - zap.String("snap-dir", s.snapDir), - ) - s.name = cfg.Name - s.dbPath = dbPath + s.dbPath = cfg.SnapshotPath s.walDir = walDir s.snapDir = filepath.Join(dataDir, "member", "snap") s.skipHashCheck = cfg.SkipHashCheck + s.lg.Info( + "restoring snapshot", + zap.String("path", s.dbPath), + zap.String("wal-dir", s.walDir), + zap.String("data-dir", dataDir), + zap.String("snap-dir", s.snapDir), + ) if err = s.saveDB(); err != nil { return err } if err = s.saveWALAndSnap(); err != nil { return err } - s.lg.Info( - "finished restoring snapshot file", - zap.String("path", dbPath), - zap.String("wal-dir", walDir), + "restored snapshot", + zap.String("path", s.dbPath), + zap.String("wal-dir", s.walDir), zap.String("data-dir", dataDir), zap.String("snap-dir", s.snapDir), ) + return nil } diff --git a/snapshot/v3_snapshot_test.go b/snapshot/v3_snapshot_test.go index f1468b429..2aed7b014 100644 --- a/snapshot/v3_snapshot_test.go +++ b/snapshot/v3_snapshot_test.go @@ -27,7 +27,6 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/embed" "github.com/coreos/etcd/pkg/testutil" - "github.com/coreos/etcd/pkg/types" "go.uber.org/zap" ) @@ -52,29 +51,23 @@ func TestSnapshotV3RestoreSingle(t *testing.T) { cfg.InitialCluster = fmt.Sprintf("%s=%s", cfg.Name, pURLs[0].String()) cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond())) - sp := NewV3(nil, zap.NewExample()) - - err := sp.Restore(dbPath, RestoreConfig{}) - if err.Error() != `couldn't find local name "" in the initial cluster configuration` { - t.Fatalf("expected restore error, got %v", err) + sp := NewV3(zap.NewExample()) + pss := make([]string, 0, len(pURLs)) + for _, p := range pURLs { + pss = append(pss, p.String()) } - var iURLs types.URLsMap - iURLs, err = types.NewURLsMap(cfg.InitialCluster) - if err != nil { - t.Fatal(err) - } - if err = sp.Restore(dbPath, RestoreConfig{ + if err := sp.Restore(RestoreConfig{ + SnapshotPath: dbPath, Name: cfg.Name, OutputDataDir: cfg.Dir, - InitialCluster: iURLs, + InitialCluster: cfg.InitialCluster, InitialClusterToken: cfg.InitialClusterToken, - PeerURLs: pURLs, + PeerURLs: pss, }); err != nil { t.Fatal(err) } - var srv *embed.Etcd - srv, err = embed.StartEtcd(cfg) + srv, err := embed.StartEtcd(cfg) if err != nil { t.Fatal(err) } @@ -176,10 +169,12 @@ func createSnapshotFile(t *testing.T, kvs []kv) string { t.Fatalf("failed to start embed.Etcd for creating snapshots") } - cli, err := clientv3.New(clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}}) + ccfg := clientv3.Config{Endpoints: []string{cfg.ACUrls[0].String()}} + cli, err := clientv3.New(ccfg) if err != nil { t.Fatal(err) } + defer cli.Close() for i := range kvs { ctx, cancel := context.WithTimeout(context.Background(), testutil.RequestTimeout) _, err = cli.Put(ctx, kvs[i].k, kvs[i].v) @@ -189,9 +184,9 @@ func createSnapshotFile(t *testing.T, kvs []kv) string { } } - sp := NewV3(cli, zap.NewExample()) + sp := NewV3(zap.NewExample()) dpPath := filepath.Join(os.TempDir(), fmt.Sprintf("snapshot%d.db", time.Now().Nanosecond())) - if err = sp.Save(context.Background(), dpPath); err != nil { + if err = sp.Save(context.Background(), ccfg, dpPath); err != nil { t.Fatal(err) } @@ -214,10 +209,6 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) ( ics += fmt.Sprintf(",%d=%s", i, pURLs[i].String()) } ics = ics[1:] - iURLs, err := types.NewURLsMap(ics) - if err != nil { - t.Fatal(err) - } cfgs := make([]*embed.Config, clusterN) for i := 0; i < clusterN; i++ { @@ -230,13 +221,14 @@ func restoreCluster(t *testing.T, clusterN int, dbPath string) ( cfg.InitialCluster = ics cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprint(time.Now().Nanosecond()+i)) - sp := NewV3(nil, zap.NewExample()) - if err := sp.Restore(dbPath, RestoreConfig{ + sp := NewV3(zap.NewExample()) + if err := sp.Restore(RestoreConfig{ + SnapshotPath: dbPath, Name: cfg.Name, OutputDataDir: cfg.Dir, - InitialCluster: iURLs, + PeerURLs: []string{pURLs[i].String()}, + InitialCluster: ics, InitialClusterToken: cfg.InitialClusterToken, - PeerURLs: types.URLs{pURLs[i]}, }); err != nil { t.Fatal(err) } From 90e5af76f366cbdc97600e441222a233d8a4a8fe Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 12 Apr 2018 10:32:32 -0700 Subject: [PATCH 2/4] etcdctl/ctlv3: use new snapshot package Signed-off-by: Gyuho Lee --- etcdctl/ctlv3/command/global.go | 9 +++++++++ etcdctl/ctlv3/command/snapshot_command.go | 23 +++++++++-------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/etcdctl/ctlv3/command/global.go b/etcdctl/ctlv3/command/global.go index 618e4aafb..753d0ad8f 100644 --- a/etcdctl/ctlv3/command/global.go +++ b/etcdctl/ctlv3/command/global.go @@ -148,6 +148,15 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientConfig { return cfg } +func mustClientCfgFromCmd(cmd *cobra.Command) *clientv3.Config { + cc := clientConfigFromCmd(cmd) + cfg, err := newClientCfg(cc.endpoints, cc.dialTimeout, cc.keepAliveTime, cc.keepAliveTimeout, cc.scfg, cc.acfg) + if err != nil { + ExitWithError(ExitBadArgs, err) + } + return cfg +} + func mustClientFromCmd(cmd *cobra.Command) *clientv3.Client { cfg := clientConfigFromCmd(cmd) return cfg.mustClient() diff --git a/etcdctl/ctlv3/command/snapshot_command.go b/etcdctl/ctlv3/command/snapshot_command.go index c48045d26..22703feef 100644 --- a/etcdctl/ctlv3/command/snapshot_command.go +++ b/etcdctl/ctlv3/command/snapshot_command.go @@ -20,7 +20,6 @@ import ( "path/filepath" "strings" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/snapshot" "github.com/spf13/cobra" @@ -104,10 +103,11 @@ func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) { if debug { lg = zap.NewExample() } - sp := snapshot.NewV3(mustClientFromCmd(cmd), lg) + sp := snapshot.NewV3(lg) + cfg := mustClientCfgFromCmd(cmd) path := args[0] - if err := sp.Save(context.TODO(), path); err != nil { + if err := sp.Save(context.TODO(), *cfg, path); err != nil { ExitWithError(ExitInterrupted, err) } fmt.Printf("Snapshot saved at %s\n", path) @@ -128,8 +128,7 @@ func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) { if debug { lg = zap.NewExample() } - sp := snapshot.NewV3(nil, lg) - + sp := snapshot.NewV3(lg) ds, err := sp.Status(args[0]) if err != nil { ExitWithError(ExitError, err) @@ -143,11 +142,6 @@ func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) { ExitWithError(ExitBadArgs, err) } - urlmap, uerr := types.NewURLsMap(restoreCluster) - if uerr != nil { - ExitWithError(ExitBadArgs, uerr) - } - dataDir := restoreDataDir if dataDir == "" { dataDir = restoreName + ".etcd" @@ -166,15 +160,16 @@ func snapshotRestoreCommandFunc(cmd *cobra.Command, args []string) { if debug { lg = zap.NewExample() } - sp := snapshot.NewV3(nil, lg) + sp := snapshot.NewV3(lg) - if err := sp.Restore(args[0], snapshot.RestoreConfig{ + if err := sp.Restore(snapshot.RestoreConfig{ + SnapshotPath: args[0], Name: restoreName, OutputDataDir: dataDir, OutputWALDir: walDir, - InitialCluster: urlmap, + PeerURLs: strings.Split(restorePeerURLs, ","), + InitialCluster: restoreCluster, InitialClusterToken: restoreClusterToken, - PeerURLs: types.MustNewURLs(strings.Split(restorePeerURLs, ",")), SkipHashCheck: skipHashCheck, }); err != nil { ExitWithError(ExitError, err) From 8ae2bbf3c48256cec439ff04d1f237c4ff393b74 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 12 Apr 2018 10:39:18 -0700 Subject: [PATCH 3/4] functional/rpcpb: use new snapshot package interface Signed-off-by: Gyuho Lee --- functional/rpcpb/member.go | 57 +++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/functional/rpcpb/member.go b/functional/rpcpb/member.go index ce49632be..01f684113 100644 --- a/functional/rpcpb/member.go +++ b/functional/rpcpb/member.go @@ -16,6 +16,7 @@ package rpcpb import ( "context" + "crypto/tls" "fmt" "net/url" "os" @@ -24,7 +25,6 @@ import ( "github.com/coreos/etcd/clientv3" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/transport" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/snapshot" "github.com/dustin/go-humanize" @@ -80,11 +80,12 @@ func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn, return grpc.Dial(m.EtcdClientEndpoint, dialOpts...) } -// CreateEtcdClient creates a client from member. -func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error) { +// CreateEtcdClientConfig creates a client configuration from member. +func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error) { secure := false for _, cu := range m.Etcd.AdvertiseClientURLs { - u, err := url.Parse(cu) + var u *url.URL + u, err = url.Parse(cu) if err != nil { return nil, err } @@ -93,7 +94,7 @@ func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, er } } - cfg := clientv3.Config{ + cfg = &clientv3.Config{ Endpoints: []string{m.EtcdClientEndpoint}, DialTimeout: 10 * time.Second, DialOptions: opts, @@ -109,13 +110,23 @@ func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, er // only need it for auto TLS InsecureSkipVerify: true, } - tlsConfig, err := tlsInfo.ClientConfig() + var tlsConfig *tls.Config + tlsConfig, err = tlsInfo.ClientConfig() if err != nil { return nil, err } cfg.TLS = tlsConfig } - return clientv3.New(cfg) + return cfg, err +} + +// CreateEtcdClient creates a client from member. +func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error) { + cfg, err := m.CreateEtcdClientConfig(opts...) + if err != nil { + return nil, err + } + return clientv3.New(*cfg) } // CheckCompact ensures that historical data before given revision has been compacted. @@ -247,12 +258,11 @@ func (m *Member) SaveSnapshot(lg *zap.Logger) (err error) { return err } - var cli *clientv3.Client - cli, err = m.CreateEtcdClient() + var ccfg *clientv3.Config + ccfg, err = m.CreateEtcdClientConfig() if err != nil { return fmt.Errorf("%v (%q)", err, m.EtcdClientEndpoint) } - defer cli.Close() lg.Info( "snapshot save START", @@ -261,8 +271,8 @@ func (m *Member) SaveSnapshot(lg *zap.Logger) (err error) { zap.String("snapshot-path", m.SnapshotPath), ) now := time.Now() - mgr := snapshot.NewV3(cli, lg) - if err = mgr.Save(context.Background(), m.SnapshotPath); err != nil { + mgr := snapshot.NewV3(lg) + if err = mgr.Save(context.Background(), *ccfg, m.SnapshotPath); err != nil { return err } took := time.Since(now) @@ -314,17 +324,6 @@ func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error) { return err } - var initialCluster types.URLsMap - initialCluster, err = types.NewURLsMap(m.EtcdOnSnapshotRestore.InitialCluster) - if err != nil { - return err - } - var peerURLs types.URLs - peerURLs, err = types.NewURLs(m.EtcdOnSnapshotRestore.AdvertisePeerURLs) - if err != nil { - return err - } - lg.Info( "snapshot restore START", zap.String("member-name", m.Etcd.Name), @@ -332,17 +331,17 @@ func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error) { zap.String("snapshot-path", m.SnapshotPath), ) now := time.Now() - mgr := snapshot.NewV3(nil, lg) - err = mgr.Restore(m.SnapshotInfo.SnapshotPath, snapshot.RestoreConfig{ + mgr := snapshot.NewV3(lg) + err = mgr.Restore(snapshot.RestoreConfig{ + SnapshotPath: m.SnapshotInfo.SnapshotPath, Name: m.EtcdOnSnapshotRestore.Name, OutputDataDir: m.EtcdOnSnapshotRestore.DataDir, OutputWALDir: m.EtcdOnSnapshotRestore.WALDir, - InitialCluster: initialCluster, + PeerURLs: m.EtcdOnSnapshotRestore.AdvertisePeerURLs, + InitialCluster: m.EtcdOnSnapshotRestore.InitialCluster, InitialClusterToken: m.EtcdOnSnapshotRestore.InitialClusterToken, - PeerURLs: peerURLs, SkipHashCheck: false, - - // TODO: SkipHashCheck == true, for recover from existing db file + // TODO: set SkipHashCheck it true, to recover from existing db file }) took := time.Since(now) lg.Info( From 5ba4e7d533248eed56c56b466d9b4cb4e3402114 Mon Sep 17 00:00:00 2001 From: Gyuho Lee Date: Thu, 12 Apr 2018 10:39:45 -0700 Subject: [PATCH 4/4] functional/tester: fix unit tests Signed-off-by: Gyuho Lee --- functional/tester/cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functional/tester/cluster_test.go b/functional/tester/cluster_test.go index dccc36307..09b8a4e81 100644 --- a/functional/tester/cluster_test.go +++ b/functional/tester/cluster_test.go @@ -204,7 +204,7 @@ func Test_read(t *testing.T) { "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", // "SIGQUIT_AND_REMOVE_LEADER", // "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT", - "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH", + // "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH", "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER", "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT", "BLACKHOLE_PEER_PORT_TX_RX_LEADER",