functional-tester: move checker logic to cluster

I move the checker logic from tester to cluster so that stressers and checkers can be initialized at the same time.
this is useful because some checker depends on stressers.
This commit is contained in:
fanmin shi
2016-10-18 10:57:32 -07:00
parent 57008f1690
commit 7d86d1050e
4 changed files with 34 additions and 34 deletions

View File

@@ -24,14 +24,18 @@ type Checker interface {
Check() error
}
type hashChecker struct {
tt *tester
type hashAndRevGetter interface {
getRevisionHash() (revs map[string]int64, hashes map[string]int64, err error)
}
func newHashChecker(tt *tester) Checker { return &hashChecker{tt} }
type hashChecker struct {
hrg hashAndRevGetter
}
func newHashChecker(hrg hashAndRevGetter) Checker { return &hashChecker{hrg} }
func (hc *hashChecker) Check() (err error) {
plog.Printf("%s fetching current revisions...", hc.tt.logPrefix())
plog.Printf("fetching current revisions...")
var (
revs map[string]int64
hashes map[string]int64
@@ -40,28 +44,28 @@ func (hc *hashChecker) Check() (err error) {
for i := 0; i < 7; i++ {
time.Sleep(time.Second)
revs, hashes, err = hc.tt.cluster.getRevisionHash()
revs, hashes, err = hc.hrg.getRevisionHash()
if err != nil {
plog.Printf("%s #%d failed to get current revisions (%v)", hc.tt.logPrefix(), i, err)
plog.Printf("#%d failed to get current revisions (%v)", i, err)
continue
}
if _, ok = getSameValue(revs); ok {
break
}
plog.Printf("%s #%d inconsistent current revisions %+v", hc.tt.logPrefix(), i, revs)
plog.Printf("#%d inconsistent current revisions %+v", i, revs)
}
if !ok || err != nil {
return fmt.Errorf("checking current revisions failed [err: %v, revisions: %v]", err, revs)
}
plog.Printf("%s all members are consistent with current revisions [revisions: %v]", hc.tt.logPrefix(), revs)
plog.Printf("all members are consistent with current revisions [revisions: %v]", revs)
plog.Printf("%s checking current storage hashes...", hc.tt.logPrefix())
plog.Printf("checking current storage hashes...")
if _, ok = getSameValue(hashes); !ok {
return fmt.Errorf("inconsistent hashes [%v]", hashes)
}
plog.Printf("%s all members are consistent with storage hashes", hc.tt.logPrefix())
plog.Printf("all members are consistent with storage hashes")
return nil
}

View File

@@ -41,19 +41,15 @@ type agentConfig struct {
type cluster struct {
agents []agentConfig
v2Only bool // to be deprecated
v2Only bool // to be deprecated
consistencyCheck bool
Size int
stressQPS int
stressKeyLargeSize int
stressKeySize int
stressKeySuffixRange int
Size int
Stressers []Stresser
Stressers []Stresser
stressBuilder stressBuilder
Checker Checker
Members []*member
stressBuilder stressBuilder
}
type ClusterStatus struct {
@@ -109,6 +105,12 @@ func (c *cluster) bootstrap() error {
go c.Stressers[i].Stress()
}
if c.consistencyCheck && !c.v2Only {
c.Checker = newHashChecker(hashAndRevGetter(c))
} else {
c.Checker = newNoChecker()
}
c.Size = size
c.Members = members
return nil

View File

@@ -74,9 +74,10 @@ func main() {
}
c := &cluster{
agents: agents,
v2Only: *isV2Only,
stressBuilder: newStressBuilder(*stresserType, sConfig),
agents: agents,
v2Only: *isV2Only,
stressBuilder: newStressBuilder(*stresserType, sConfig),
consistencyCheck: *consistencyCheck,
}
if err := c.bootstrap(); err != nil {
@@ -116,11 +117,6 @@ func main() {
failures: schedule,
cluster: c,
limit: *limit,
checker: newNoChecker(),
}
if *consistencyCheck && !c.v2Only {
t.checker = newHashChecker(t)
}
sh := statusHandler{status: &t.status}

View File

@@ -20,11 +20,9 @@ import (
)
type tester struct {
failures []failure
cluster *cluster
limit int
checker Checker
failures []failure
cluster *cluster
limit int
status Status
currentRevision int64
}
@@ -146,7 +144,7 @@ func (tt *tester) checkConsistency() (err error) {
}
err = tt.startStressers()
}()
if err = tt.checker.Check(); err != nil {
if err = tt.cluster.Checker.Check(); err != nil {
plog.Printf("%s %v", tt.logPrefix(), err)
}
return err