pkg/testutil: ForceGosched -> WaitSchedule

ForceGosched() performs bad when GOMAXPROCS>1. When GOMAXPROCS=1, it
could promise that other goroutines run long enough
because it always yield the processor to other goroutines. But it cannot
yield processor to goroutine running on other processors. So when
GOMAXPROCS>1, the yield may finish when goroutine on the other
processor just runs for little time.

Here is a test to confirm the case:

```
package main

import (
	"fmt"
	"runtime"
	"testing"
)

func ForceGosched() {
	// possibility enough to sched up to 10 go routines.
	for i := 0; i < 10000; i++ {
		runtime.Gosched()
	}
}

var d int

func loop(c chan struct{}) {
	for {
		select {
		case <-c:
			for i := 0; i < 1000; i++ {
				fmt.Sprintf("come to time %d", i)
			}
			d++
		}
	}
}

func TestLoop(t *testing.T) {
	c := make(chan struct{}, 1)
	go loop(c)
	c <- struct{}{}
	ForceGosched()
	if d != 1 {
		t.Fatal("d is not incremented")
	}
}
```

`go test -v -race` runs well, but `GOMAXPROCS=2 go test -v -race` fails.

Change the functionality to waiting for schedule to happen.
This commit is contained in:
Yicheng Qin 2015-06-09 19:26:23 -07:00
parent 05b55d9d75
commit 018fb8e6d9
6 changed files with 21 additions and 25 deletions

View File

@ -607,7 +607,7 @@ func TestSync(t *testing.T) {
}) })
srv.sync(10 * time.Second) srv.sync(10 * time.Second)
timer.Stop() timer.Stop()
testutil.ForceGosched() testutil.WaitSchedule()
action := n.Action() action := n.Action()
if len(action) != 1 { if len(action) != 1 {
@ -642,7 +642,7 @@ func TestSyncTimeout(t *testing.T) {
timer.Stop() timer.Stop()
// give time for goroutine in sync to cancel // give time for goroutine in sync to cancel
testutil.ForceGosched() testutil.WaitSchedule()
w := []testutil.Action{{Name: "Propose blocked"}} w := []testutil.Action{{Name: "Propose blocked"}}
if g := n.Action(); !reflect.DeepEqual(g, w) { if g := n.Action(); !reflect.DeepEqual(g, w) {
t.Errorf("action = %v, want %v", g, w) t.Errorf("action = %v, want %v", g, w)
@ -676,7 +676,7 @@ func TestSyncTrigger(t *testing.T) {
} }
// trigger a sync request // trigger a sync request
st <- time.Time{} st <- time.Time{}
testutil.ForceGosched() testutil.WaitSchedule()
action := n.Action() action := n.Action()
if len(action) != 1 { if len(action) != 1 {
@ -710,7 +710,7 @@ func TestSnapshot(t *testing.T) {
store: st, store: st,
} }
srv.snapshot(1, raftpb.ConfState{Nodes: []uint64{1}}) srv.snapshot(1, raftpb.ConfState{Nodes: []uint64{1}})
testutil.ForceGosched() testutil.WaitSchedule()
gaction := st.Action() gaction := st.Action()
if len(gaction) != 2 { if len(gaction) != 2 {
t.Fatalf("len(action) = %d, want 1", len(gaction)) t.Fatalf("len(action) = %d, want 1", len(gaction))
@ -786,7 +786,7 @@ func TestRecvSnapshot(t *testing.T) {
s.start() s.start()
n.readyc <- raft.Ready{Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}} n.readyc <- raft.Ready{Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}}
// make goroutines move forward to receive snapshot // make goroutines move forward to receive snapshot
testutil.ForceGosched() testutil.WaitSchedule()
s.Stop() s.Stop()
wactions := []testutil.Action{{Name: "Recovery"}} wactions := []testutil.Action{{Name: "Recovery"}}
@ -827,7 +827,7 @@ func TestApplySnapshotAndCommittedEntries(t *testing.T) {
}, },
} }
// make goroutines move forward to receive snapshot // make goroutines move forward to receive snapshot
testutil.ForceGosched() testutil.WaitSchedule()
s.Stop() s.Stop()
actions := st.Action() actions := st.Action()

View File

@ -16,17 +16,13 @@ package testutil
import ( import (
"net/url" "net/url"
"runtime"
"testing" "testing"
"time"
) )
// WARNING: This is a hack. // TODO: improve this when we are able to know the schedule or status of target go-routine.
// Remove this when we are able to block/check the status of the go-routines. func WaitSchedule() {
func ForceGosched() { time.Sleep(3 * time.Millisecond)
// possibility enough to sched up to 10 go routines.
for i := 0; i < 10000; i++ {
runtime.Gosched()
}
} }
func MustNewURLs(t *testing.T, urls []string) []url.URL { func MustNewURLs(t *testing.T, urls []string) []url.URL {

View File

@ -199,7 +199,7 @@ func TestBlockProposal(t *testing.T) {
errc <- n.Propose(context.TODO(), []byte("somedata")) errc <- n.Propose(context.TODO(), []byte("somedata"))
}() }()
testutil.ForceGosched() testutil.WaitSchedule()
select { select {
case err := <-errc: case err := <-errc:
t.Errorf("err = %v, want blocking", err) t.Errorf("err = %v, want blocking", err)
@ -207,7 +207,7 @@ func TestBlockProposal(t *testing.T) {
} }
n.Campaign(context.TODO()) n.Campaign(context.TODO())
testutil.ForceGosched() testutil.WaitSchedule()
select { select {
case err := <-errc: case err := <-errc:
if err != nil { if err != nil {

View File

@ -39,7 +39,7 @@ func TestPipelineSend(t *testing.T) {
p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), fs, &fakeRaft{}, nil) p := newPipeline(tr, picker, types.ID(2), types.ID(1), types.ID(1), fs, &fakeRaft{}, nil)
p.msgc <- raftpb.Message{Type: raftpb.MsgApp} p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
testutil.ForceGosched() testutil.WaitSchedule()
p.stop() p.stop()
if tr.Request() == nil { if tr.Request() == nil {
@ -60,7 +60,7 @@ func TestPipelineExceedMaximalServing(t *testing.T) {
// keep the sender busy and make the buffer full // keep the sender busy and make the buffer full
// nothing can go out as we block the sender // nothing can go out as we block the sender
testutil.ForceGosched() testutil.WaitSchedule()
for i := 0; i < connPerPipeline+pipelineBufSize; i++ { for i := 0; i < connPerPipeline+pipelineBufSize; i++ {
select { select {
case p.msgc <- raftpb.Message{}: case p.msgc <- raftpb.Message{}:
@ -68,7 +68,7 @@ func TestPipelineExceedMaximalServing(t *testing.T) {
t.Errorf("failed to send out message") t.Errorf("failed to send out message")
} }
// force the sender to grab data // force the sender to grab data
testutil.ForceGosched() testutil.WaitSchedule()
} }
// try to send a data when we are sure the buffer is full // try to send a data when we are sure the buffer is full
@ -80,7 +80,7 @@ func TestPipelineExceedMaximalServing(t *testing.T) {
// unblock the senders and force them to send out the data // unblock the senders and force them to send out the data
tr.unblock() tr.unblock()
testutil.ForceGosched() testutil.WaitSchedule()
// It could send new data after previous ones succeed // It could send new data after previous ones succeed
select { select {
@ -99,7 +99,7 @@ func TestPipelineSendFailed(t *testing.T) {
p := newPipeline(newRespRoundTripper(0, errors.New("blah")), picker, types.ID(2), types.ID(1), types.ID(1), fs, &fakeRaft{}, nil) p := newPipeline(newRespRoundTripper(0, errors.New("blah")), picker, types.ID(2), types.ID(1), types.ID(1), fs, &fakeRaft{}, nil)
p.msgc <- raftpb.Message{Type: raftpb.MsgApp} p.msgc <- raftpb.Message{Type: raftpb.MsgApp}
testutil.ForceGosched() testutil.WaitSchedule()
p.stop() p.stop()
fs.Lock() fs.Lock()

View File

@ -33,7 +33,7 @@ func TestStreamWriterAttachOutgoingConn(t *testing.T) {
prevwfc := wfc prevwfc := wfc
wfc = &fakeWriteFlushCloser{} wfc = &fakeWriteFlushCloser{}
sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})
testutil.ForceGosched() testutil.WaitSchedule()
// previous attached connection should be closed // previous attached connection should be closed
if prevwfc != nil && prevwfc.closed != true { if prevwfc != nil && prevwfc.closed != true {
t.Errorf("#%d: close of previous connection = %v, want true", i, prevwfc.closed) t.Errorf("#%d: close of previous connection = %v, want true", i, prevwfc.closed)
@ -44,7 +44,7 @@ func TestStreamWriterAttachOutgoingConn(t *testing.T) {
} }
sw.msgc <- raftpb.Message{} sw.msgc <- raftpb.Message{}
testutil.ForceGosched() testutil.WaitSchedule()
// still working // still working
if _, ok := sw.writec(); ok != true { if _, ok := sw.writec(); ok != true {
t.Errorf("#%d: working status = %v, want true", i, ok) t.Errorf("#%d: working status = %v, want true", i, ok)
@ -73,7 +73,7 @@ func TestStreamWriterAttachBadOutgoingConn(t *testing.T) {
sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc}) sw.attach(&outgoingConn{t: streamTypeMessage, Writer: wfc, Flusher: wfc, Closer: wfc})
sw.msgc <- raftpb.Message{} sw.msgc <- raftpb.Message{}
testutil.ForceGosched() testutil.WaitSchedule()
// no longer working // no longer working
if _, ok := sw.writec(); ok != false { if _, ok := sw.writec(); ok != false {
t.Errorf("working = %v, want false", ok) t.Errorf("working = %v, want false", ok)

View File

@ -137,7 +137,7 @@ func TestTransportErrorc(t *testing.T) {
} }
tr.peers[1].Send(raftpb.Message{}) tr.peers[1].Send(raftpb.Message{})
testutil.ForceGosched() testutil.WaitSchedule()
select { select {
case <-errorc: case <-errorc:
default: default: