mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
e2e tests: More logging and expect adopted to 3.4.
This commit is contained in:
parent
7ae3d25f91
commit
4725567d5e
@ -222,15 +222,15 @@ func TestCtlV2BackupV3Snapshot(t *testing.T) { testCtlV2Backup(t, 1, true) }
|
||||
func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
|
||||
BeforeTestV2(t)
|
||||
|
||||
backupDir, err := ioutil.TempDir("", "testbackup0.etcd")
|
||||
backupDir, err := ioutil.TempDir(t.TempDir(), "testbackup0.etcd")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(backupDir)
|
||||
|
||||
etcdCfg := newConfigNoTLS()
|
||||
etcdCfg.snapshotCount = snapCount
|
||||
etcdCfg.enableV2 = true
|
||||
t.Log("Starting etcd-1")
|
||||
epc1 := setupEtcdctlTest(t, etcdCfg, false)
|
||||
|
||||
// v3 put before v2 set so snapshot happens after v3 operations to confirm
|
||||
@ -241,23 +241,30 @@ func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
|
||||
}
|
||||
os.Setenv("ETCDCTL_API", "2")
|
||||
|
||||
t.Log("Setting key in etcd-1")
|
||||
if err := etcdctlSet(epc1, "foo1", "bar1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if v3 {
|
||||
t.Log("Stopping etcd-1")
|
||||
// v3 must lock the db to backup, so stop process
|
||||
if err := epc1.Stop(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := etcdctlBackup(epc1, epc1.procs[0].Config().dataDirPath, backupDir, v3); err != nil {
|
||||
t.Log("Triggering etcd backup")
|
||||
if err := etcdctlBackup(t, epc1, epc1.procs[0].Config().dataDirPath, backupDir, v3); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Closing etcd-1 backup")
|
||||
if err := epc1.Close(); err != nil {
|
||||
t.Fatalf("error closing etcd processes (%v)", err)
|
||||
}
|
||||
|
||||
t.Logf("Backup directory: %s", backupDir)
|
||||
|
||||
t.Log("Starting etcd-2 (post backup)")
|
||||
// restart from the backup directory
|
||||
cfg2 := newConfigNoTLS()
|
||||
cfg2.dataDirPath = backupDir
|
||||
@ -268,6 +275,7 @@ func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
|
||||
// Make sure a failing test is not leaking resources (running server).
|
||||
defer epc2.Close()
|
||||
|
||||
t.Log("Getting examplar key")
|
||||
// check if backup went through correctly
|
||||
if err := etcdctlGet(epc2, "foo1", "bar1", false); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -276,6 +284,7 @@ func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
|
||||
os.Setenv("ETCDCTL_API", "3")
|
||||
ctx2 := ctlCtx{t: t, epc: epc2}
|
||||
if v3 {
|
||||
t.Log("Getting v3 examplar key")
|
||||
if err := ctlV3Get(ctx2, []string{"v3key"}, kv{"v3key", "123"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -286,6 +295,7 @@ func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
|
||||
}
|
||||
os.Setenv("ETCDCTL_API", "2")
|
||||
|
||||
t.Log("Getting examplar key foo2")
|
||||
// check if it can serve client requests
|
||||
if err := etcdctlSet(epc2, "foo2", "bar2"); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -294,6 +304,7 @@ func testCtlV2Backup(t *testing.T, snapCount int, v3 bool) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log("Closing etcd-2")
|
||||
if err := epc2.Close(); err != nil {
|
||||
t.Fatalf("error closing etcd processes (%v)", err)
|
||||
}
|
||||
@ -472,11 +483,12 @@ func etcdctlAuthEnable(clus *etcdProcessCluster) error {
|
||||
return spawnWithExpect(cmdArgs, "Authentication Enabled")
|
||||
}
|
||||
|
||||
func etcdctlBackup(clus *etcdProcessCluster, dataDir, backupDir string, v3 bool) error {
|
||||
func etcdctlBackup(t testing.TB, clus *etcdProcessCluster, dataDir, backupDir string, v3 bool) error {
|
||||
cmdArgs := append(etcdctlPrefixArgs(clus), "backup", "--data-dir", dataDir, "--backup-dir", backupDir)
|
||||
if v3 {
|
||||
cmdArgs = append(cmdArgs, "--with-v3")
|
||||
}
|
||||
t.Logf("Running: %v", cmdArgs)
|
||||
proc, err := spawnCmd(cmdArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
etcdServerReadyLines = []string{"enabled capabilities for version", "published", "ready to serve client requests"}
|
||||
etcdServerReadyLines = []string{"ready to serve client requests"}
|
||||
binPath string
|
||||
ctlBinPath string
|
||||
)
|
||||
|
@ -49,20 +49,6 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
t.Fatalf("error closing etcd processes (%v)", errC)
|
||||
}
|
||||
}()
|
||||
// 3.0 boots as 2.3 then negotiates up to 3.0
|
||||
// so there's a window at boot time where it doesn't have V3rpcCapability enabled
|
||||
// poll /version until etcdcluster is >2.3.x before making v3 requests
|
||||
for i := 0; i < 7; i++ {
|
||||
if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"3.`}); err != nil {
|
||||
t.Logf("#%d: v3 is not ready yet (%v)", i, err)
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("cannot pull version (%v)", err)
|
||||
}
|
||||
|
||||
os.Setenv("ETCDCTL_API", "3")
|
||||
defer os.Unsetenv("ETCDCTL_API")
|
||||
@ -83,24 +69,32 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Cluster of etcd in old version running")
|
||||
|
||||
for i := range epc.procs {
|
||||
t.Logf("Stopping node: %v", i)
|
||||
if err := epc.procs[i].Stop(); err != nil {
|
||||
t.Fatalf("#%d: error closing etcd process (%v)", i, err)
|
||||
}
|
||||
t.Logf("Stopped node: %v", i)
|
||||
epc.procs[i].Config().execPath = binDir + "/etcd"
|
||||
epc.procs[i].Config().keepDataDir = true
|
||||
|
||||
t.Logf("Restarting node in the new version: %v", i)
|
||||
if err := epc.procs[i].Restart(); err != nil {
|
||||
t.Fatalf("error restarting etcd process (%v)", err)
|
||||
}
|
||||
|
||||
t.Logf("Testing reads after node restarts: %v", i)
|
||||
for j := range kvs {
|
||||
if err := ctlV3Get(cx, []string{kvs[j].key}, []kv{kvs[j]}...); err != nil {
|
||||
cx.t.Fatalf("#%d-%d: ctlV3Get error (%v)", i, j, err)
|
||||
}
|
||||
}
|
||||
t.Logf("Tested reads after node restarts: %v", i)
|
||||
}
|
||||
|
||||
t.Log("Waiting for full upgrade...")
|
||||
// TODO: update after release candidate
|
||||
// expect upgraded cluster version
|
||||
// new cluster version needs more time to upgrade
|
||||
@ -116,6 +110,7 @@ func TestReleaseUpgrade(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("cluster version is not upgraded (%v)", err)
|
||||
}
|
||||
t.Log("TestReleaseUpgrade businessLogic DONE")
|
||||
}
|
||||
|
||||
func TestReleaseUpgradeWithRestart(t *testing.T) {
|
||||
|
@ -27,15 +27,13 @@ import (
|
||||
)
|
||||
|
||||
func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error {
|
||||
c := 0
|
||||
matchSet := func(l string) bool {
|
||||
for _, s := range readyStrs {
|
||||
if strings.Contains(l, s) {
|
||||
c++
|
||||
break
|
||||
return true
|
||||
}
|
||||
}
|
||||
return c == len(readyStrs)
|
||||
return false
|
||||
}
|
||||
_, err := exproc.ExpectFunc(matchSet)
|
||||
return err
|
||||
|
Loading…
x
Reference in New Issue
Block a user