server: more better testing

This commit is contained in:
Yicheng Qin
2014-08-14 12:14:47 -07:00
parent cce88a8504
commit bb95187bc7
3 changed files with 268 additions and 284 deletions

View File

@@ -18,9 +18,11 @@ package etcd
import (
"math/rand"
"reflect"
"testing"
"time"
"github.com/coreos/etcd/conf"
"github.com/coreos/etcd/third_party/github.com/coreos/go-etcd/etcd"
)
@@ -101,51 +103,31 @@ func TestJoinThroughFollower(t *testing.T) {
}
}
// func TestClusterConfigReload(t *testing.T) {
// defer afterTest(t)
// tests := []int{3, 5, 7}
func TestClusterConfigReload(t *testing.T) {
defer afterTest(t)
// for i, tt := range tests {
// es, hs := buildCluster(tt, false)
// waitCluster(t, es)
cl := &testCluster{Size: 5}
cl.Start()
defer cl.Destroy()
// lead, _ := waitLeader(es)
// conf := config.NewClusterConfig()
// conf.ActiveSize = 15
// conf.RemoveDelay = 60
// if err := es[lead].p.setClusterConfig(conf); err != nil {
// t.Fatalf("#%d: setClusterConfig err = %v", i, err)
// }
lead, _ := cl.Leader()
cc := conf.NewClusterConfig()
cc.ActiveSize = 15
cc.RemoveDelay = 60
if err := cl.Participant(int(lead)).setClusterConfig(cc); err != nil {
t.Fatalf("setClusterConfig err = %v", err)
}
// for k := range es {
// es[k].Stop()
// hs[k].Close()
// }
cl.Stop()
cl.Restart()
// for k := range es {
// c := newTestConfig()
// c.DataDir = es[k].config.DataDir
// c.Addr = hs[k].Listener.Addr().String()
// id := es[k].id
// e, h := newUnstartedTestServer(c, id, false)
// err := startServer(t, e)
// if err != nil {
// t.Fatal(err)
// }
// es[k] = e
// hs[k] = h
// }
// lead, _ = waitLeader(es)
// // wait for msgAppResp to commit all entries
// time.Sleep(2 * defaultHeartbeat * es[lead].tickDuration)
// if g := es[lead].p.clusterConfig(); !reflect.DeepEqual(g, conf) {
// t.Errorf("#%d: clusterConfig = %+v, want %+v", i, g, conf)
// }
// destoryCluster(t, es, hs)
// }
// }
lead, _ = cl.Leader()
// wait for msgAppResp to commit all entries
time.Sleep(2 * defaultHeartbeat * cl.Participant(0).tickDuration)
if g := cl.Participant(int(lead)).clusterConfig(); !reflect.DeepEqual(g, cc) {
t.Errorf("clusterConfig = %+v, want %+v", g, cc)
}
}
func TestFiveNodeKillOneAndRecover(t *testing.T) {
defer afterTest(t)

View File

@@ -302,7 +302,7 @@ func (c *testCluster) Start() {
c.nodes = nodes
nodes[0] = &testServer{Id: 0, TLS: c.TLS}
nodes[0].Start()
if !nodes[0].WaitMode(participantMode, 5) {
if !nodes[0].WaitMode(participantMode, 10) {
panic("cannot wait until participantMode")
}
@@ -319,7 +319,7 @@ func (c *testCluster) Start() {
// or this configuration request might be dropped.
// Or it could be a slow join because it needs to retry.
// TODO: this might not be true if we add param for retry interval.
if !s.WaitMode(participantMode, 3) {
if !s.WaitMode(participantMode, 20) {
panic("cannot wait until participantMode")
}
w, err := s.Participant().Watch(v2machineKVPrefix, true, false, uint64(i))

View File

@@ -16,276 +16,278 @@ limitations under the License.
package etcd
// func TestMachinesEndPoint(t *testing.T) {
// es, hs := buildCluster(3, false)
// waitCluster(t, es)
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"reflect"
"sort"
"strings"
"testing"
// w := make([]string, len(hs))
// for i := range hs {
// w[i] = hs[i].URL
// }
"github.com/coreos/etcd/conf"
"github.com/coreos/etcd/store"
)
// for i := range hs {
// r, err := http.Get(hs[i].URL + v2machinePrefix)
// if err != nil {
// t.Errorf("%v", err)
// break
// }
// b, err := ioutil.ReadAll(r.Body)
// r.Body.Close()
// if err != nil {
// t.Errorf("%v", err)
// break
// }
// g := strings.Split(string(b), ",")
// sort.Strings(g)
// if !reflect.DeepEqual(w, g) {
// t.Errorf("machines = %v, want %v", g, w)
// }
// }
func TestMachinesEndPoint(t *testing.T) {
cl := &testCluster{Size: 3}
cl.Start()
// destoryCluster(t, es, hs)
// afterTest(t)
// }
w := make([]string, cl.Size)
for i := 0; i < cl.Size; i++ {
w[i] = cl.URL(i)
}
// func TestLeaderEndPoint(t *testing.T) {
// es, hs := buildCluster(3, false)
// waitCluster(t, es)
for i := 0; i < cl.Size; i++ {
r, err := http.Get(cl.URL(i) + v2machinePrefix)
if err != nil {
t.Errorf("%v", err)
break
}
b, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
t.Errorf("%v", err)
break
}
g := strings.Split(string(b), ",")
sort.Strings(g)
if !reflect.DeepEqual(w, g) {
t.Errorf("machines = %v, want %v", g, w)
}
}
// us := make([]string, len(hs))
// for i := range hs {
// us[i] = hs[i].URL
// }
// // todo(xiangli) change this to raft port...
// w := hs[0].URL + "/raft"
cl.Destroy()
}
// for i := range hs {
// r, err := http.Get(hs[i].URL + v2LeaderPrefix)
// if err != nil {
// t.Errorf("%v", err)
// break
// }
// b, err := ioutil.ReadAll(r.Body)
// r.Body.Close()
// if err != nil {
// t.Errorf("%v", err)
// break
// }
// if string(b) != w {
// t.Errorf("leader = %v, want %v", string(b), w)
// }
// }
func TestLeaderEndPoint(t *testing.T) {
cl := &testCluster{Size: 3}
cl.Start()
// destoryCluster(t, es, hs)
// afterTest(t)
// }
us := make([]string, cl.Size)
for i := 0; i < cl.Size; i++ {
us[i] = cl.URL(i)
}
// todo(xiangli) change this to raft port...
w := cl.URL(0) + "/raft"
// func TestStoreStatsEndPoint(t *testing.T) {
// es, hs := buildCluster(1, false)
// waitCluster(t, es)
for i := 0; i < cl.Size; i++ {
r, err := http.Get(cl.URL(i) + v2LeaderPrefix)
if err != nil {
t.Errorf("%v", err)
break
}
b, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
t.Errorf("%v", err)
break
}
if string(b) != w {
t.Errorf("leader = %v, want %v", string(b), w)
}
}
// resp, err := http.Get(hs[0].URL + v2StoreStatsPrefix)
// if err != nil {
// t.Errorf("%v", err)
// }
// stats := new(store.Stats)
// d := json.NewDecoder(resp.Body)
// err = d.Decode(stats)
// resp.Body.Close()
// if err != nil {
// t.Errorf("%v", err)
// }
cl.Destroy()
}
// if stats.SetSuccess != 1 {
// t.Errorf("setSuccess = %d, want 1", stats.SetSuccess)
// }
func TestStoreStatsEndPoint(t *testing.T) {
cl := &testCluster{Size: 1}
cl.Start()
// destoryCluster(t, es, hs)
// afterTest(t)
// }
resp, err := http.Get(cl.URL(0) + v2StoreStatsPrefix)
if err != nil {
t.Errorf("%v", err)
}
stats := new(store.Stats)
d := json.NewDecoder(resp.Body)
err = d.Decode(stats)
resp.Body.Close()
if err != nil {
t.Errorf("%v", err)
}
// func TestGetAdminConfigEndPoint(t *testing.T) {
// es, hs := buildCluster(3, false)
// waitCluster(t, es)
if stats.SetSuccess != 1 {
t.Errorf("setSuccess = %d, want 1", stats.SetSuccess)
}
// for i := range hs {
// r, err := http.Get(hs[i].URL + v2adminConfigPrefix)
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
// if g := r.StatusCode; g != 200 {
// t.Errorf("#%d: status = %d, want %d", i, g, 200)
// }
// if g := r.Header.Get("Content-Type"); g != "application/json" {
// t.Errorf("#%d: ContentType = %d, want application/json", i, g)
// }
cl.Destroy()
}
// conf := new(config.ClusterConfig)
// err = json.NewDecoder(r.Body).Decode(conf)
// r.Body.Close()
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
// w := config.NewClusterConfig()
// if !reflect.DeepEqual(conf, w) {
// t.Errorf("#%d: config = %+v, want %+v", i, conf, w)
// }
// }
func TestGetAdminConfigEndPoint(t *testing.T) {
cl := &testCluster{Size: 1}
cl.Start()
// destoryCluster(t, es, hs)
// afterTest(t)
// }
r, err := http.Get(cl.URL(0) + v2adminConfigPrefix)
if err != nil {
t.Errorf("%v", err)
}
if g := r.StatusCode; g != 200 {
t.Errorf("status = %d, want %d", g, 200)
}
if g := r.Header.Get("Content-Type"); g != "application/json" {
t.Errorf("ContentType = %s, want application/json", g)
}
// func TestPutAdminConfigEndPoint(t *testing.T) {
// tests := []struct {
// c, wc string
// }{
// {
// `{"activeSize":1,"removeDelay":1,"syncInterval":1}`,
// `{"activeSize":3,"removeDelay":2,"syncInterval":1}`,
// },
// {
// `{"activeSize":5,"removeDelay":20.5,"syncInterval":1.5}`,
// `{"activeSize":5,"removeDelay":20.5,"syncInterval":1.5}`,
// },
// {
// `{"activeSize":5 , "removeDelay":20 , "syncInterval": 2 }`,
// `{"activeSize":5,"removeDelay":20,"syncInterval":2}`,
// },
// {
// `{"activeSize":3, "removeDelay":60}`,
// `{"activeSize":3,"removeDelay":60,"syncInterval":5}`,
// },
// }
cfg := new(conf.ClusterConfig)
err = json.NewDecoder(r.Body).Decode(cfg)
r.Body.Close()
if err != nil {
t.Errorf("%v", err)
}
w := conf.NewClusterConfig()
if !reflect.DeepEqual(cfg, w) {
t.Errorf("config = %+v, want %+v", cfg, w)
}
// for i, tt := range tests {
// es, hs := buildCluster(3, false)
// waitCluster(t, es)
// index := es[0].p.Index()
cl.Destroy()
}
// r, err := NewTestClient().Put(hs[0].URL+v2adminConfigPrefix, "application/json", bytes.NewBufferString(tt.c))
// if err != nil {
// t.Fatalf("%v", err)
// }
// b, err := ioutil.ReadAll(r.Body)
// r.Body.Close()
// if err != nil {
// t.Fatalf("%v", err)
// }
// if wbody := append([]byte(tt.wc), '\n'); !reflect.DeepEqual(b, wbody) {
// t.Errorf("#%d: put result = %s, want %s", i, b, wbody)
// }
func TestPutAdminConfigEndPoint(t *testing.T) {
tests := []struct {
c, wc string
}{
{
`{"activeSize":1,"removeDelay":1,"syncInterval":1}`,
`{"activeSize":3,"removeDelay":2,"syncInterval":1}`,
},
{
`{"activeSize":5,"removeDelay":20.5,"syncInterval":1.5}`,
`{"activeSize":5,"removeDelay":20.5,"syncInterval":1.5}`,
},
{
`{"activeSize":5 , "removeDelay":20 , "syncInterval": 2 }`,
`{"activeSize":5,"removeDelay":20,"syncInterval":2}`,
},
{
`{"activeSize":3, "removeDelay":60}`,
`{"activeSize":3,"removeDelay":60,"syncInterval":5}`,
},
}
// for j := range es {
// w, err := es[j].p.Watch(v2configKVPrefix, false, false, index)
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
// e := <-w.EventChan
// if g := *e.Node.Value; g != tt.wc {
// t.Errorf("#%d.%d: %s = %s, want %s", i, j, v2configKVPrefix, g, tt.wc)
// }
// }
for i, tt := range tests {
cl := &testCluster{Size: 1}
cl.Start()
index := cl.Participant(0).Index()
// destoryCluster(t, es, hs)
// }
// afterTest(t)
// }
r, err := NewTestClient().Put(cl.URL(0)+v2adminConfigPrefix, "application/json", bytes.NewBufferString(tt.c))
if err != nil {
t.Fatalf("%v", err)
}
b, err := ioutil.ReadAll(r.Body)
r.Body.Close()
if err != nil {
t.Fatalf("%v", err)
}
if wbody := append([]byte(tt.wc), '\n'); !reflect.DeepEqual(b, wbody) {
t.Errorf("#%d: put result = %s, want %s", i, b, wbody)
}
// func TestGetAdminMachineEndPoint(t *testing.T) {
// es, hs := buildCluster(3, false)
// waitCluster(t, es)
w, err := cl.Participant(0).Watch(v2configKVPrefix, false, false, index)
if err != nil {
t.Errorf("%v", err)
continue
}
e := <-w.EventChan
if g := *e.Node.Value; g != tt.wc {
t.Errorf("#%d: %s = %s, want %s", i, v2configKVPrefix, g, tt.wc)
}
// for i := range es {
// for j := range hs {
// name := fmt.Sprint(es[i].id)
// r, err := http.Get(hs[j].URL + v2adminMachinesPrefix + name)
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
// if g := r.StatusCode; g != 200 {
// t.Errorf("#%d on %d: status = %d, want %d", i, j, g, 200)
// }
// if g := r.Header.Get("Content-Type"); g != "application/json" {
// t.Errorf("#%d on %d: ContentType = %d, want application/json", i, j, g)
// }
cl.Destroy()
}
}
// m := new(machineMessage)
// err = json.NewDecoder(r.Body).Decode(m)
// r.Body.Close()
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
// wm := &machineMessage{
// Name: name,
// State: stateFollower,
// ClientURL: hs[i].URL,
// PeerURL: hs[i].URL,
// }
// if i == 0 {
// wm.State = stateLeader
// }
// if !reflect.DeepEqual(m, wm) {
// t.Errorf("#%d on %d: body = %+v, want %+v", i, j, m, wm)
// }
// }
// }
func TestGetAdminMachineEndPoint(t *testing.T) {
cl := &testCluster{Size: 3}
cl.Start()
// destoryCluster(t, es, hs)
// afterTest(t)
// }
for i := 0; i < cl.Size; i++ {
for j := 0; j < cl.Size; j++ {
name := fmt.Sprint(cl.Node(i).Id)
r, err := http.Get(cl.URL(j) + v2adminMachinesPrefix + name)
if err != nil {
t.Errorf("%v", err)
continue
}
if g := r.StatusCode; g != 200 {
t.Errorf("#%d on %d: status = %d, want %d", i, j, g, 200)
}
if g := r.Header.Get("Content-Type"); g != "application/json" {
t.Errorf("#%d on %d: ContentType = %s, want application/json", i, j, g)
}
// func TestGetAdminMachinesEndPoint(t *testing.T) {
// es, hs := buildCluster(3, false)
// waitCluster(t, es)
m := new(machineMessage)
err = json.NewDecoder(r.Body).Decode(m)
r.Body.Close()
if err != nil {
t.Errorf("%v", err)
continue
}
wm := &machineMessage{
Name: name,
State: stateFollower,
ClientURL: cl.URL(i),
PeerURL: cl.URL(i),
}
if i == 0 {
wm.State = stateLeader
}
if !reflect.DeepEqual(m, wm) {
t.Errorf("#%d on %d: body = %+v, want %+v", i, j, m, wm)
}
}
}
// w := make([]*machineMessage, len(hs))
// for i := range hs {
// w[i] = &machineMessage{
// Name: fmt.Sprint(es[i].id),
// State: stateFollower,
// ClientURL: hs[i].URL,
// PeerURL: hs[i].URL,
// }
// }
// w[0].State = stateLeader
cl.Destroy()
}
// for i := range hs {
// r, err := http.Get(hs[i].URL + v2adminMachinesPrefix)
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
// m := make([]*machineMessage, 0)
// err = json.NewDecoder(r.Body).Decode(&m)
// r.Body.Close()
// if err != nil {
// t.Errorf("%v", err)
// continue
// }
func TestGetAdminMachinesEndPoint(t *testing.T) {
cl := &testCluster{Size: 3}
cl.Start()
// sm := machineSlice(m)
// sw := machineSlice(w)
// sort.Sort(sm)
// sort.Sort(sw)
w := make([]*machineMessage, cl.Size)
for i := 0; i < cl.Size; i++ {
w[i] = &machineMessage{
Name: fmt.Sprint(cl.Node(i).Id),
State: stateFollower,
ClientURL: cl.URL(i),
PeerURL: cl.URL(i),
}
}
w[0].State = stateLeader
// if !reflect.DeepEqual(sm, sw) {
// t.Errorf("on %d: machines = %+v, want %+v", i, sm, sw)
// }
// }
for i := 0; i < cl.Size; i++ {
r, err := http.Get(cl.URL(i) + v2adminMachinesPrefix)
if err != nil {
t.Errorf("%v", err)
continue
}
m := make([]*machineMessage, 0)
err = json.NewDecoder(r.Body).Decode(&m)
r.Body.Close()
if err != nil {
t.Errorf("%v", err)
continue
}
// destoryCluster(t, es, hs)
// afterTest(t)
// }
sm := machineSlice(m)
sw := machineSlice(w)
sort.Sort(sm)
sort.Sort(sw)
// // int64Slice implements sort interface
// type machineSlice []*machineMessage
if !reflect.DeepEqual(sm, sw) {
t.Errorf("on %d: machines = %+v, want %+v", i, sm, sw)
}
}
// func (s machineSlice) Len() int { return len(s) }
// func (s machineSlice) Less(i, j int) bool { return s[i].Name < s[j].Name }
// func (s machineSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
cl.Destroy()
}
// int64Slice implements sort interface
type machineSlice []*machineMessage
func (s machineSlice) Len() int { return len(s) }
func (s machineSlice) Less(i, j int) bool { return s[i].Name < s[j].Name }
func (s machineSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }