mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
comments: fix comments as per goword in go test files
Comments fixed as per goword in go test files that shell function go_srcs_in_module lists as per changes on #14827 Helps in #14827 Signed-off-by: Bhargav Ravuri <bhargav.ravuri@infracloud.io>
This commit is contained in:
parent
cc77eb1011
commit
2feec4fe68
@ -36,7 +36,7 @@ func SplitTestArgs(args []string) (testArgs, appArgs []string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Empty test to avoid no-tests warning.
|
// TestEmpty is an empty test to avoid no-tests warning.
|
||||||
func TestEmpty(t *testing.T) {}
|
func TestEmpty(t *testing.T) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -36,7 +36,7 @@ func SplitTestArgs(args []string) (testArgs, appArgs []string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Empty test to avoid no-tests warning.
|
// TestEmpty is empty test to avoid no-tests warning.
|
||||||
func TestEmpty(t *testing.T) {}
|
func TestEmpty(t *testing.T) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -46,7 +46,7 @@ func TestPageWriterRandom(t *testing.T) {
|
|||||||
t.Logf("total write bytes: %d (of %d)", cw.writeBytes, n)
|
t.Logf("total write bytes: %d (of %d)", cw.writeBytes, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPageWriterPariallack tests the case where a write overflows the buffer
|
// TestPageWriterPartialSlack tests the case where a write overflows the buffer
|
||||||
// but there is not enough data to complete the slack write.
|
// but there is not enough data to complete the slack write.
|
||||||
func TestPageWriterPartialSlack(t *testing.T) {
|
func TestPageWriterPartialSlack(t *testing.T) {
|
||||||
defaultBufferBytes = 1024
|
defaultBufferBytes = 1024
|
||||||
|
@ -81,7 +81,7 @@ func TestNodeStep(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel and Stop should unblock Step()
|
// TestNodeStepUnblock should Cancel and Stop should unblock Step()
|
||||||
func TestNodeStepUnblock(t *testing.T) {
|
func TestNodeStepUnblock(t *testing.T) {
|
||||||
// a node without buffer to block step
|
// a node without buffer to block step
|
||||||
n := &node{
|
n := &node{
|
||||||
|
@ -1236,8 +1236,8 @@ func TestPastElectionTimeout(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure that the Step function ignores the message from old term and does not pass it to the
|
// TestStepIgnoreOldTermMsg to ensure that the Step function ignores the message
|
||||||
// actual stepX function.
|
// from old term and does not pass it to the actual stepX function.
|
||||||
func TestStepIgnoreOldTermMsg(t *testing.T) {
|
func TestStepIgnoreOldTermMsg(t *testing.T) {
|
||||||
called := false
|
called := false
|
||||||
fakeStep := func(r *raft, m pb.Message) error {
|
fakeStep := func(r *raft, m pb.Message) error {
|
||||||
@ -2509,7 +2509,7 @@ func TestLeaderAppResp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// When the leader receives a heartbeat tick, it should
|
// TestBcastBeat is when the leader receives a heartbeat tick, it should
|
||||||
// send a MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries.
|
// send a MsgHeartbeat with m.Index = 0, m.LogTerm=0 and empty entries.
|
||||||
func TestBcastBeat(t *testing.T) {
|
func TestBcastBeat(t *testing.T) {
|
||||||
offset := uint64(1000)
|
offset := uint64(1000)
|
||||||
@ -2569,7 +2569,7 @@ func TestBcastBeat(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// tests the output of the state machine when receiving MsgBeat
|
// TestRecvMsgBeat tests the output of the state machine when receiving MsgBeat
|
||||||
func TestRecvMsgBeat(t *testing.T) {
|
func TestRecvMsgBeat(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
state StateType
|
state StateType
|
||||||
@ -2848,7 +2848,7 @@ func TestRestoreWithLearner(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// / Tests if outgoing voter can receive and apply snapshot correctly.
|
// TestRestoreWithVotersOutgoing tests if outgoing voter can receive and apply snapshot correctly.
|
||||||
func TestRestoreWithVotersOutgoing(t *testing.T) {
|
func TestRestoreWithVotersOutgoing(t *testing.T) {
|
||||||
s := pb.Snapshot{
|
s := pb.Snapshot{
|
||||||
Metadata: pb.SnapshotMetadata{
|
Metadata: pb.SnapshotMetadata{
|
||||||
@ -4340,12 +4340,12 @@ func testConfChangeCheckBeforeCampaign(t *testing.T, v2 bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests if unapplied ConfChange is checked before campaign.
|
// TestConfChangeCheckBeforeCampaign tests if unapplied ConfChange is checked before campaign.
|
||||||
func TestConfChangeCheckBeforeCampaign(t *testing.T) {
|
func TestConfChangeCheckBeforeCampaign(t *testing.T) {
|
||||||
testConfChangeCheckBeforeCampaign(t, false)
|
testConfChangeCheckBeforeCampaign(t, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests if unapplied ConfChangeV2 is checked before campaign.
|
// TestConfChangeV2CheckBeforeCampaign tests if unapplied ConfChangeV2 is checked before campaign.
|
||||||
func TestConfChangeV2CheckBeforeCampaign(t *testing.T) {
|
func TestConfChangeV2CheckBeforeCampaign(t *testing.T) {
|
||||||
testConfChangeCheckBeforeCampaign(t, true)
|
testConfChangeCheckBeforeCampaign(t, true)
|
||||||
}
|
}
|
||||||
|
@ -36,22 +36,22 @@ type rawNodeAdapter struct {
|
|||||||
|
|
||||||
var _ Node = (*rawNodeAdapter)(nil)
|
var _ Node = (*rawNodeAdapter)(nil)
|
||||||
|
|
||||||
// Node specifies lead, which is pointless, can just be filled in.
|
// TransferLeadership is to test when node specifies lead, which is pointless, can just be filled in.
|
||||||
func (a *rawNodeAdapter) TransferLeadership(ctx context.Context, lead, transferee uint64) {
|
func (a *rawNodeAdapter) TransferLeadership(ctx context.Context, lead, transferee uint64) {
|
||||||
a.RawNode.TransferLeader(transferee)
|
a.RawNode.TransferLeader(transferee)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node has a goroutine, RawNode doesn't need this.
|
// Stop when node has a goroutine, RawNode doesn't need this.
|
||||||
func (a *rawNodeAdapter) Stop() {}
|
func (a *rawNodeAdapter) Stop() {}
|
||||||
|
|
||||||
// RawNode returns a *Status.
|
// Status retirns RawNode's status as *Status.
|
||||||
func (a *rawNodeAdapter) Status() Status { return a.RawNode.Status() }
|
func (a *rawNodeAdapter) Status() Status { return a.RawNode.Status() }
|
||||||
|
|
||||||
// RawNode takes a Ready. It doesn't really have to do that I think? It can hold on
|
// Advance is when RawNode takes a Ready. It doesn't really have to do that I think? It can hold on
|
||||||
// to it internally. But maybe that approach is frail.
|
// to it internally. But maybe that approach is frail.
|
||||||
func (a *rawNodeAdapter) Advance() { a.RawNode.Advance(Ready{}) }
|
func (a *rawNodeAdapter) Advance() { a.RawNode.Advance(Ready{}) }
|
||||||
|
|
||||||
// RawNode returns a Ready, not a chan of one.
|
// Ready when RawNode returns a Ready, not a chan of one.
|
||||||
func (a *rawNodeAdapter) Ready() <-chan Ready { return nil }
|
func (a *rawNodeAdapter) Ready() <-chan Ready { return nil }
|
||||||
|
|
||||||
// Node takes more contexts. Easy enough to fix.
|
// Node takes more contexts. Easy enough to fix.
|
||||||
|
@ -68,7 +68,7 @@ func TestNewAuthStoreRevision(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestNewAuthStoreBryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid
|
// TestNewAuthStoreBcryptCost ensures that NewAuthStore uses default when given bcrypt-cost is invalid
|
||||||
func TestNewAuthStoreBcryptCost(t *testing.T) {
|
func TestNewAuthStoreBcryptCost(t *testing.T) {
|
||||||
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -779,7 +779,7 @@ func TestIsAuthEnabled(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestAuthRevisionRace ensures that access to authStore.revision is thread-safe.
|
// TestAuthInfoFromCtxRace ensures that access to authStore.revision is thread-safe.
|
||||||
func TestAuthInfoFromCtxRace(t *testing.T) {
|
func TestAuthInfoFromCtxRace(t *testing.T) {
|
||||||
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
tp, err := NewTokenProvider(zaptest.NewLogger(t), tokenTypeSimple, dummyIndexWaiter, simpleTokenTTLDefault)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that a successful Get is recorded in the stats.
|
// TestStoreStatsGetSuccess ensures that a successful Get is recorded in the stats.
|
||||||
func TestStoreStatsGetSuccess(t *testing.T) {
|
func TestStoreStatsGetSuccess(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -29,7 +29,7 @@ func TestStoreStatsGetSuccess(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.GetSuccess, "")
|
assert.Equal(t, uint64(1), s.Stats.GetSuccess, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a failed Get is recorded in the stats.
|
// TestStoreStatsGetFail ensures that a failed Get is recorded in the stats.
|
||||||
func TestStoreStatsGetFail(t *testing.T) {
|
func TestStoreStatsGetFail(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -37,14 +37,14 @@ func TestStoreStatsGetFail(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.GetFail, "")
|
assert.Equal(t, uint64(1), s.Stats.GetFail, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a successful Create is recorded in the stats.
|
// TestStoreStatsCreateSuccess ensures that a successful Create is recorded in the stats.
|
||||||
func TestStoreStatsCreateSuccess(t *testing.T) {
|
func TestStoreStatsCreateSuccess(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
assert.Equal(t, uint64(1), s.Stats.CreateSuccess, "")
|
assert.Equal(t, uint64(1), s.Stats.CreateSuccess, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a failed Create is recorded in the stats.
|
// TestStoreStatsCreateFail ensures that a failed Create is recorded in the stats.
|
||||||
func TestStoreStatsCreateFail(t *testing.T) {
|
func TestStoreStatsCreateFail(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -52,7 +52,7 @@ func TestStoreStatsCreateFail(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.CreateFail, "")
|
assert.Equal(t, uint64(1), s.Stats.CreateFail, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a successful Update is recorded in the stats.
|
// TestStoreStatsUpdateSuccess ensures that a successful Update is recorded in the stats.
|
||||||
func TestStoreStatsUpdateSuccess(t *testing.T) {
|
func TestStoreStatsUpdateSuccess(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -60,14 +60,14 @@ func TestStoreStatsUpdateSuccess(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.UpdateSuccess, "")
|
assert.Equal(t, uint64(1), s.Stats.UpdateSuccess, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a failed Update is recorded in the stats.
|
// TestStoreStatsUpdateFail ensures that a failed Update is recorded in the stats.
|
||||||
func TestStoreStatsUpdateFail(t *testing.T) {
|
func TestStoreStatsUpdateFail(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Update("/foo", "bar", TTLOptionSet{ExpireTime: Permanent})
|
s.Update("/foo", "bar", TTLOptionSet{ExpireTime: Permanent})
|
||||||
assert.Equal(t, uint64(1), s.Stats.UpdateFail, "")
|
assert.Equal(t, uint64(1), s.Stats.UpdateFail, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a successful CAS is recorded in the stats.
|
// TestStoreStatsCompareAndSwapSuccess ensures that a successful CAS is recorded in the stats.
|
||||||
func TestStoreStatsCompareAndSwapSuccess(t *testing.T) {
|
func TestStoreStatsCompareAndSwapSuccess(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -75,7 +75,7 @@ func TestStoreStatsCompareAndSwapSuccess(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.CompareAndSwapSuccess, "")
|
assert.Equal(t, uint64(1), s.Stats.CompareAndSwapSuccess, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a failed CAS is recorded in the stats.
|
// TestStoreStatsCompareAndSwapFail ensures that a failed CAS is recorded in the stats.
|
||||||
func TestStoreStatsCompareAndSwapFail(t *testing.T) {
|
func TestStoreStatsCompareAndSwapFail(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -83,7 +83,7 @@ func TestStoreStatsCompareAndSwapFail(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.CompareAndSwapFail, "")
|
assert.Equal(t, uint64(1), s.Stats.CompareAndSwapFail, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a successful Delete is recorded in the stats.
|
// TestStoreStatsDeleteSuccess ensures that a successful Delete is recorded in the stats.
|
||||||
func TestStoreStatsDeleteSuccess(t *testing.T) {
|
func TestStoreStatsDeleteSuccess(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent})
|
||||||
@ -91,14 +91,14 @@ func TestStoreStatsDeleteSuccess(t *testing.T) {
|
|||||||
assert.Equal(t, uint64(1), s.Stats.DeleteSuccess, "")
|
assert.Equal(t, uint64(1), s.Stats.DeleteSuccess, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a failed Delete is recorded in the stats.
|
// TestStoreStatsDeleteFail ensures that a failed Delete is recorded in the stats.
|
||||||
func TestStoreStatsDeleteFail(t *testing.T) {
|
func TestStoreStatsDeleteFail(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.Delete("/foo", false, false)
|
s.Delete("/foo", false, false)
|
||||||
assert.Equal(t, uint64(1), s.Stats.DeleteFail, "")
|
assert.Equal(t, uint64(1), s.Stats.DeleteFail, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the number of expirations is recorded in the stats.
|
// TestStoreStatsExpireCount ensures that the number of expirations is recorded in the stats.
|
||||||
func TestStoreStatsExpireCount(t *testing.T) {
|
func TestStoreStatsExpireCount(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
"github.com/jonboulle/clockwork"
|
"github.com/jonboulle/clockwork"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that any TTL <= minExpireTime becomes Permanent
|
// TestMinExpireTime ensures that any TTL <= minExpireTime becomes Permanent
|
||||||
func TestMinExpireTime(t *testing.T) {
|
func TestMinExpireTime(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := clockwork.NewFakeClock()
|
fc := clockwork.NewFakeClock()
|
||||||
@ -45,7 +45,7 @@ func TestMinExpireTime(t *testing.T) {
|
|||||||
assert.Equal(t, e.Node.TTL, int64(0))
|
assert.Equal(t, e.Node.TTL, int64(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can recursively retrieve a directory listing.
|
// TestStoreGetDirectory ensures that the store can recursively retrieve a directory listing.
|
||||||
// Note that hidden files should not be returned.
|
// Note that hidden files should not be returned.
|
||||||
func TestStoreGetDirectory(t *testing.T) {
|
func TestStoreGetDirectory(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
@ -94,7 +94,7 @@ func TestStoreGetDirectory(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can update the TTL on a value.
|
// TestStoreUpdateValueTTL ensures that the store can update the TTL on a value.
|
||||||
func TestStoreUpdateValueTTL(t *testing.T) {
|
func TestStoreUpdateValueTTL(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -114,7 +114,7 @@ func TestStoreUpdateValueTTL(t *testing.T) {
|
|||||||
assert.Equal(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound)
|
assert.Equal(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can update the TTL on a directory.
|
// TestStoreUpdateDirTTL ensures that the store can update the TTL on a directory.
|
||||||
func TestStoreUpdateDirTTL(t *testing.T) {
|
func TestStoreUpdateDirTTL(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -140,7 +140,7 @@ func TestStoreUpdateDirTTL(t *testing.T) {
|
|||||||
assert.Equal(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound)
|
assert.Equal(t, err.(*v2error.Error).ErrorCode, v2error.EcodeKeyNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can watch for key expiration.
|
// TestStoreWatchExpire ensures that the store can watch for key expiration.
|
||||||
func TestStoreWatchExpire(t *testing.T) {
|
func TestStoreWatchExpire(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -178,7 +178,7 @@ func TestStoreWatchExpire(t *testing.T) {
|
|||||||
assert.Equal(t, e.Node.Dir, true)
|
assert.Equal(t, e.Node.Dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can watch for key expiration when refreshing.
|
// TestStoreWatchExpireRefresh ensures that the store can watch for key expiration when refreshing.
|
||||||
func TestStoreWatchExpireRefresh(t *testing.T) {
|
func TestStoreWatchExpireRefresh(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -214,7 +214,7 @@ func TestStoreWatchExpireRefresh(t *testing.T) {
|
|||||||
assert.Equal(t, e.Node.Key, "/foofoo")
|
assert.Equal(t, e.Node.Key, "/foofoo")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can watch for key expiration when refreshing with an empty value.
|
// TestStoreWatchExpireEmptyRefresh ensures that the store can watch for key expiration when refreshing with an empty value.
|
||||||
func TestStoreWatchExpireEmptyRefresh(t *testing.T) {
|
func TestStoreWatchExpireEmptyRefresh(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -239,7 +239,7 @@ func TestStoreWatchExpireEmptyRefresh(t *testing.T) {
|
|||||||
assert.Equal(t, *e.PrevNode.Value, "bar")
|
assert.Equal(t, *e.PrevNode.Value, "bar")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update TTL of a key (set TTLOptionSet.Refresh to false) and send notification
|
// TestStoreWatchNoRefresh updates TTL of a key (set TTLOptionSet.Refresh to false) and send notification
|
||||||
func TestStoreWatchNoRefresh(t *testing.T) {
|
func TestStoreWatchNoRefresh(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -265,7 +265,7 @@ func TestStoreWatchNoRefresh(t *testing.T) {
|
|||||||
assert.Equal(t, *e.PrevNode.Value, "bar")
|
assert.Equal(t, *e.PrevNode.Value, "bar")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can update the TTL on a value with refresh.
|
// TestStoreRefresh ensures that the store can update the TTL on a value with refresh.
|
||||||
func TestStoreRefresh(t *testing.T) {
|
func TestStoreRefresh(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
@ -287,7 +287,7 @@ func TestStoreRefresh(t *testing.T) {
|
|||||||
testutil.AssertNil(t, err)
|
testutil.AssertNil(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store can recover from a previously saved state that includes an expiring key.
|
// TestStoreRecoverWithExpiration ensures that the store can recover from a previously saved state that includes an expiring key.
|
||||||
func TestStoreRecoverWithExpiration(t *testing.T) {
|
func TestStoreRecoverWithExpiration(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
s.clock = newFakeClock()
|
s.clock = newFakeClock()
|
||||||
@ -321,7 +321,7 @@ func TestStoreRecoverWithExpiration(t *testing.T) {
|
|||||||
testutil.AssertNil(t, e)
|
testutil.AssertNil(t, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the store doesn't see expirations of hidden keys.
|
// TestStoreWatchExpireWithHiddenKey ensures that the store doesn't see expirations of hidden keys.
|
||||||
func TestStoreWatchExpireWithHiddenKey(t *testing.T) {
|
func TestStoreWatchExpireWithHiddenKey(t *testing.T) {
|
||||||
s := newStore()
|
s := newStore()
|
||||||
fc := newFakeClock()
|
fc := newFakeClock()
|
||||||
|
@ -35,7 +35,7 @@ type fakeKVForClusterSize struct {
|
|||||||
clusterSizeStr string
|
clusterSizeStr string
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only need to overwrite the method `Get`.
|
// Get when we only need to overwrite the method `Get`.
|
||||||
func (fkv *fakeKVForClusterSize) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
func (fkv *fakeKVForClusterSize) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||||
if fkv.clusterSizeStr == "" {
|
if fkv.clusterSizeStr == "" {
|
||||||
// cluster size isn't configured in this case.
|
// cluster size isn't configured in this case.
|
||||||
@ -108,7 +108,7 @@ type fakeKVForClusterMembers struct {
|
|||||||
members []memberInfo
|
members []memberInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only need to overwrite method `Get`.
|
// Get when we only need to overwrite method `Get`.
|
||||||
func (fkv *fakeKVForClusterMembers) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
func (fkv *fakeKVForClusterMembers) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||||
kvs := memberInfoToKeyValues(fkv.members)
|
kvs := memberInfoToKeyValues(fkv.members)
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ type fakeKVForCheckCluster struct {
|
|||||||
getMembersRetries int
|
getMembersRetries int
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only need to overwrite method `Get`.
|
// Get when we only need to overwrite method `Get`.
|
||||||
func (fkv *fakeKVForCheckCluster) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
func (fkv *fakeKVForCheckCluster) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {
|
||||||
clusterSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", fkv.token)
|
clusterSizeKey := fmt.Sprintf("/_etcd/registry/%s/_config/size", fkv.token)
|
||||||
clusterMembersKey := fmt.Sprintf("/_etcd/registry/%s/members", fkv.token)
|
clusterMembersKey := fmt.Sprintf("/_etcd/registry/%s/members", fkv.token)
|
||||||
@ -420,7 +420,7 @@ type fakeKVForRegisterSelf struct {
|
|||||||
retries int
|
retries int
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only need to overwrite method `Put`.
|
// Put when we only need to overwrite method `Put`.
|
||||||
func (fkv *fakeKVForRegisterSelf) Put(ctx context.Context, key string, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
func (fkv *fakeKVForRegisterSelf) Put(ctx context.Context, key string, val string, opts ...clientv3.OpOption) (*clientv3.PutResponse, error) {
|
||||||
if key != fkv.expectedRegKey {
|
if key != fkv.expectedRegKey {
|
||||||
fkv.t.Errorf("unexpected register key, expected: %s, got: %s", fkv.expectedRegKey, key)
|
fkv.t.Errorf("unexpected register key, expected: %s, got: %s", fkv.expectedRegKey, key)
|
||||||
@ -515,7 +515,7 @@ type fakeWatcherForWaitPeers struct {
|
|||||||
members []memberInfo
|
members []memberInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// We only need to overwrite method `Watch`.
|
// Watch we only need to overwrite method `Watch`.
|
||||||
func (fw *fakeWatcherForWaitPeers) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
func (fw *fakeWatcherForWaitPeers) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
|
||||||
expectedWatchKey := fmt.Sprintf("/_etcd/registry/%s/members", fw.token)
|
expectedWatchKey := fmt.Sprintf("/_etcd/registry/%s/members", fw.token)
|
||||||
if key != expectedWatchKey {
|
if key != expectedWatchKey {
|
||||||
|
@ -278,9 +278,8 @@ func TestProcessDuplicatedAppRespMessage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that none of the expvars that get added during init panic.
|
// TestExpvarWithNoRaftStatus to test that none of the expvars that get added during init panic.
|
||||||
// This matters if another package imports etcdserver,
|
// This matters if another package imports etcdserver, doesn't use it, but does use expvars.
|
||||||
// doesn't use it, but does use expvars.
|
|
||||||
func TestExpvarWithNoRaftStatus(t *testing.T) {
|
func TestExpvarWithNoRaftStatus(t *testing.T) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
|
@ -1014,7 +1014,7 @@ func TestSyncTrigger(t *testing.T) {
|
|||||||
<-n.Chan()
|
<-n.Chan()
|
||||||
}
|
}
|
||||||
|
|
||||||
// snapshot should snapshot the store and cut the persistent
|
// TestSnapshot should snapshot the store and cut the persistent
|
||||||
func TestSnapshot(t *testing.T) {
|
func TestSnapshot(t *testing.T) {
|
||||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
|
|
||||||
@ -1170,7 +1170,7 @@ func TestSnapshotOrdering(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Applied > SnapshotCount should trigger a SaveSnap event
|
// TestTriggerSnap for Applied > SnapshotCount should trigger a SaveSnap event
|
||||||
func TestTriggerSnap(t *testing.T) {
|
func TestTriggerSnap(t *testing.T) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(t)
|
be, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -1519,7 +1519,7 @@ func TestPublishV3(t *testing.T) {
|
|||||||
Name: "node1", ClientUrls: []string{"http://a", "http://b"}}}, r.ClusterMemberAttrSet)
|
Name: "node1", ClientUrls: []string{"http://a", "http://b"}}}, r.ClusterMemberAttrSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPublishStopped tests that publish will be stopped if server is stopped.
|
// TestPublishV3Stopped tests that publish will be stopped if server is stopped.
|
||||||
func TestPublishV3Stopped(t *testing.T) {
|
func TestPublishV3Stopped(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
r := newRaftNode(raftNodeConfig{
|
r := newRaftNode(raftNodeConfig{
|
||||||
@ -1547,7 +1547,7 @@ func TestPublishV3Stopped(t *testing.T) {
|
|||||||
srv.publishV3(time.Hour)
|
srv.publishV3(time.Hour)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPublishRetry tests that publish will keep retry until success.
|
// TestPublishV3Retry tests that publish will keep retry until success.
|
||||||
func TestPublishV3Retry(t *testing.T) {
|
func TestPublishV3Retry(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
n := newNodeRecorderStream()
|
n := newNodeRecorderStream()
|
||||||
|
@ -32,7 +32,7 @@ func BenchmarkLessorRevoke100000(b *testing.B) { benchmarkLessorRevoke(100000, b
|
|||||||
func BenchmarkLessorRenew1000(b *testing.B) { benchmarkLessorRenew(1000, b) }
|
func BenchmarkLessorRenew1000(b *testing.B) { benchmarkLessorRenew(1000, b) }
|
||||||
func BenchmarkLessorRenew100000(b *testing.B) { benchmarkLessorRenew(100000, b) }
|
func BenchmarkLessorRenew100000(b *testing.B) { benchmarkLessorRenew(100000, b) }
|
||||||
|
|
||||||
// Use findExpired10000 replace findExpired1000, which takes too long.
|
// BenchmarkLessorFindExpired10000 uses findExpired10000 replace findExpired1000, which takes too long.
|
||||||
func BenchmarkLessorFindExpired10000(b *testing.B) { benchmarkLessorFindExpired(10000, b) }
|
func BenchmarkLessorFindExpired10000(b *testing.B) { benchmarkLessorFindExpired(10000, b) }
|
||||||
func BenchmarkLessorFindExpired100000(b *testing.B) { benchmarkLessorFindExpired(100000, b) }
|
func BenchmarkLessorFindExpired100000(b *testing.B) { benchmarkLessorFindExpired(100000, b) }
|
||||||
|
|
||||||
|
@ -27,9 +27,9 @@ import (
|
|||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Test HashByRevValue values to ensure we don't change the output which would
|
// TestHashByRevValue test HashByRevValue values to ensure we don't change the
|
||||||
// have catastrophic consequences. Expected output is just hardcoded, so please
|
// output which would have catastrophic consequences. Expected output is just
|
||||||
// regenerate it every time you change input parameters.
|
// hardcoded, so please regenerate it every time you change input parameters.
|
||||||
func TestHashByRevValue(t *testing.T) {
|
func TestHashByRevValue(t *testing.T) {
|
||||||
b, _ := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
@ -127,6 +127,7 @@ func testHashByRev(t *testing.T, s *store, rev int64) KeyValueHash {
|
|||||||
return hash
|
return hash
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCompactionHash tests compaction hash
|
||||||
// TODO: Change this to fuzz test
|
// TODO: Change this to fuzz test
|
||||||
func TestCompactionHash(t *testing.T) {
|
func TestCompactionHash(t *testing.T) {
|
||||||
b, _ := betesting.NewDefaultTmpBackend(t)
|
b, _ := betesting.NewDefaultTmpBackend(t)
|
||||||
|
@ -530,7 +530,8 @@ func cloneGeneration(g *generation) *generation {
|
|||||||
return &generation{g.ver, g.created, tmp}
|
return &generation{g.ver, g.created, tmp}
|
||||||
}
|
}
|
||||||
|
|
||||||
// test that compact on version that higher than last modified version works well
|
// TestKeyIndexCompactOnFurtherRev tests that compact on version that
|
||||||
|
// higher than last modified version works well
|
||||||
func TestKeyIndexCompactOnFurtherRev(t *testing.T) {
|
func TestKeyIndexCompactOnFurtherRev(t *testing.T) {
|
||||||
ki := &keyIndex{key: []byte("foo")}
|
ki := &keyIndex{key: []byte("foo")}
|
||||||
ki.put(zaptest.NewLogger(t), 1, 0)
|
ki.put(zaptest.NewLogger(t), 1, 0)
|
||||||
|
@ -394,7 +394,8 @@ func testKVPutWithSameLease(t *testing.T, f putFunc) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// test that range, put, delete on single key in sequence repeatedly works correctly.
|
// TestKVOperationInSequence tests that range, put, delete on single key in
|
||||||
|
// sequence repeatedly works correctly.
|
||||||
func TestKVOperationInSequence(t *testing.T) {
|
func TestKVOperationInSequence(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
@ -496,7 +497,8 @@ func TestKVTxnNonBlockRange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// test that txn range, put, delete on single key in sequence repeatedly works correctly.
|
// TestKVTxnOperationInSequence tests that txn range, put, delete on single key
|
||||||
|
// in sequence repeatedly works correctly.
|
||||||
func TestKVTxnOperationInSequence(t *testing.T) {
|
func TestKVTxnOperationInSequence(t *testing.T) {
|
||||||
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
b, tmpPath := betesting.NewDefaultTmpBackend(t)
|
||||||
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(t), b, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
@ -94,7 +94,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key
|
// BenchmarkStorePutUpdate is same as above, but instead updates single key
|
||||||
func BenchmarkStorePutUpdate(b *testing.B) {
|
func BenchmarkStorePutUpdate(b *testing.B) {
|
||||||
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
be, tmpPath := betesting.NewDefaultTmpBackend(b)
|
||||||
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
s := NewStore(zaptest.NewLogger(b), be, &lease.FakeLessor{}, StoreConfig{})
|
||||||
|
@ -112,11 +112,12 @@ func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmarks on cancel function performance for unsynced watchers
|
// BenchmarkWatchableStoreUnsyncedCancel benchmarks on cancel function
|
||||||
// in a WatchableStore. It creates k*N watchers to populate unsynced
|
// performance for unsynced watchers in a WatchableStore. It creates
|
||||||
// with a reasonably large number of watchers. And measures the time it
|
// k*N watchers to populate unsynced with a reasonably large number of
|
||||||
// takes to cancel N watchers out of k*N watchers. The performance is
|
// watchers. And measures the time it takes to cancel N watchers out
|
||||||
// expected to differ depending on the unsynced member implementation.
|
// of k*N watchers. The performance is expected to differ depending on
|
||||||
|
// the unsynced member implementation.
|
||||||
// TODO: k is an arbitrary constant. We need to figure out what factor
|
// TODO: k is an arbitrary constant. We need to figure out what factor
|
||||||
// we should put to simulate the real-world use cases.
|
// we should put to simulate the real-world use cases.
|
||||||
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
|
||||||
|
@ -252,6 +252,7 @@ func TestVerify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCut tests cut
|
||||||
// TODO: split it into smaller tests for better readability
|
// TODO: split it into smaller tests for better readability
|
||||||
func TestCut(t *testing.T) {
|
func TestCut(t *testing.T) {
|
||||||
p := t.TempDir()
|
p := t.TempDir()
|
||||||
|
@ -30,9 +30,9 @@ func unitClusterTestCases() []testCase {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// When a build tag (e.g. e2e or integration) isn't configured in IDE,
|
// WithAuth is when a build tag (e.g. e2e or integration) isn't configured
|
||||||
// then IDE may complain "Unresolved reference 'WithAuth'". So we need
|
// in IDE, then IDE may complain "Unresolved reference 'WithAuth'".
|
||||||
// to define a default WithAuth to resolve such case.
|
// So we need to define a default WithAuth to resolve such case.
|
||||||
func WithAuth(userName, password string) config.ClientOption {
|
func WithAuth(userName, password string) config.ClientOption {
|
||||||
return func(any) {}
|
return func(any) {}
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,8 @@ func snapshotCorruptTest(cx ctlCtx) {
|
|||||||
require.ErrorContains(cx.t, serr, "Error: expected sha256")
|
require.ErrorContains(cx.t, serr, "Error: expected sha256")
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test ensures that the snapshot status does not modify the snapshot file
|
// TestCtlV3SnapshotStatusBeforeRestore ensures that the snapshot
|
||||||
|
// status does not modify the snapshot file
|
||||||
func TestCtlV3SnapshotStatusBeforeRestore(t *testing.T) {
|
func TestCtlV3SnapshotStatusBeforeRestore(t *testing.T) {
|
||||||
testCtl(t, snapshotStatusBeforeRestoreTest)
|
testCtl(t, snapshotStatusBeforeRestoreTest)
|
||||||
}
|
}
|
||||||
@ -278,7 +279,8 @@ func testIssue6361(t *testing.T) {
|
|||||||
t.Log("Test logic done")
|
t.Log("Test logic done")
|
||||||
}
|
}
|
||||||
|
|
||||||
// For storageVersion to be stored, all fields expected 3.6 fields need to be set. This happens after first WAL snapshot.
|
// TestCtlV3SnapshotVersion is for storageVersion to be stored, all fields
|
||||||
|
// expected 3.6 fields need to be set. This happens after first WAL snapshot.
|
||||||
// In this test we lower SnapshotCount to 1 to ensure WAL snapshot is triggered.
|
// In this test we lower SnapshotCount to 1 to ensure WAL snapshot is triggered.
|
||||||
func TestCtlV3SnapshotVersion(t *testing.T) {
|
func TestCtlV3SnapshotVersion(t *testing.T) {
|
||||||
testCtl(t, snapshotVersionTest, withCfg(*e2e.NewConfig(e2e.WithSnapshotCount(1))))
|
testCtl(t, snapshotVersionTest, withCfg(*e2e.NewConfig(e2e.WithSnapshotCount(1))))
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
"go.etcd.io/etcd/tests/v3/framework/e2e"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestMixVersionsSendSnapshot tests the mix version send snapshots
|
||||||
// TODO(ahrtr): add network partition scenario to trigger snapshots.
|
// TODO(ahrtr): add network partition scenario to trigger snapshots.
|
||||||
func TestMixVersionsSendSnapshot(t *testing.T) {
|
func TestMixVersionsSendSnapshot(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
|
@ -51,6 +51,7 @@ func TestWarningApplyDuration(t *testing.T) {
|
|||||||
e2e.AssertProcessLogs(t, epc.Procs[0], "request stats")
|
e2e.AssertProcessLogs(t, epc.Procs[0], "request stats")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestExperimentalWarningApplyDuration tests the experimental warning apply duration
|
||||||
// TODO: this test is a duplicate of TestWarningApplyDuration except it uses --experimental-warning-unary-request-duration
|
// TODO: this test is a duplicate of TestWarningApplyDuration except it uses --experimental-warning-unary-request-duration
|
||||||
// Remove this test after --experimental-warning-unary-request-duration flag is removed.
|
// Remove this test after --experimental-warning-unary-request-duration flag is removed.
|
||||||
func TestExperimentalWarningApplyDuration(t *testing.T) {
|
func TestExperimentalWarningApplyDuration(t *testing.T) {
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
"go.etcd.io/etcd/tests/v3/framework/testutils"
|
"go.etcd.io/etcd/tests/v3/framework/testutils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NO TLS
|
// TestV3Curl_MaxStreams_BelowLimit_NoTLS_Small tests no TLS
|
||||||
func TestV3Curl_MaxStreams_BelowLimit_NoTLS_Small(t *testing.T) {
|
func TestV3Curl_MaxStreams_BelowLimit_NoTLS_Small(t *testing.T) {
|
||||||
testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3))
|
testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(3))
|
||||||
}
|
}
|
||||||
@ -58,7 +58,7 @@ func TestV3Curl_MaxStreams_ReachLimit_NoTLS_Medium(t *testing.T) {
|
|||||||
testV3CurlMaxStream(t, true, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second))
|
testV3CurlMaxStream(t, true, withCfg(*e2e.NewConfigNoTLS()), withMaxConcurrentStreams(100), withTestTimeout(20*time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TLS
|
// TestV3Curl_MaxStreams_BelowLimit_TLS_Small tests with TLS
|
||||||
func TestV3Curl_MaxStreams_BelowLimit_TLS_Small(t *testing.T) {
|
func TestV3Curl_MaxStreams_BelowLimit_TLS_Small(t *testing.T) {
|
||||||
testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(3))
|
testV3CurlMaxStream(t, false, withCfg(*e2e.NewConfigTLS()), withMaxConcurrentStreams(3))
|
||||||
}
|
}
|
||||||
|
@ -70,6 +70,7 @@ func TestMaintenanceHashKV(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestCompactionHash tests compaction hash
|
||||||
// TODO: Change this to fuzz test
|
// TODO: Change this to fuzz test
|
||||||
func TestCompactionHash(t *testing.T) {
|
func TestCompactionHash(t *testing.T) {
|
||||||
integration2.BeforeTest(t)
|
integration2.BeforeTest(t)
|
||||||
@ -246,7 +247,7 @@ func TestMaintenanceSnapshotWithVersionErrorInflight(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMaintenanceSnapshotError ensures that ReaderCloser returned by Snapshot function
|
// TestMaintenanceSnapshotErrorInflight ensures that ReaderCloser returned by Snapshot function
|
||||||
// will fail to read with corresponding context errors on inflight context cancel timeout.
|
// will fail to read with corresponding context errors on inflight context cancel timeout.
|
||||||
func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
|
func TestMaintenanceSnapshotErrorInflight(t *testing.T) {
|
||||||
testMaintenanceSnapshotErrorInflight(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) {
|
testMaintenanceSnapshotErrorInflight(t, func(ctx context.Context, client *clientv3.Client) (io.ReadCloser, error) {
|
||||||
|
@ -110,7 +110,8 @@ func authSetupRoot(t *testing.T, auth clientv3.Auth) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Client can connect to etcd even if they supply credentials and the server is in AuthDisable mode.
|
// TestGetTokenWithoutAuth is when Client can connect to etcd even if they
|
||||||
|
// supply credentials and the server is in AuthDisable mode.
|
||||||
func TestGetTokenWithoutAuth(t *testing.T) {
|
func TestGetTokenWithoutAuth(t *testing.T) {
|
||||||
integration2.BeforeTest(t)
|
integration2.BeforeTest(t)
|
||||||
|
|
||||||
|
@ -58,8 +58,9 @@ func TestTLSClusterOf3(t *testing.T) {
|
|||||||
clusterMustProgress(t, c.Members)
|
clusterMustProgress(t, c.Members)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that a cluster can progress when using separate client and server certs when peering. This supports certificate
|
// TestTLSClusterOf3WithSpecificUsage tests that a cluster can progress when
|
||||||
// authorities that don't issue dual-usage certificates.
|
// using separate client and server certs when peering. This supports
|
||||||
|
// certificate authorities that don't issue dual-usage certificates.
|
||||||
func TestTLSClusterOf3WithSpecificUsage(t *testing.T) {
|
func TestTLSClusterOf3WithSpecificUsage(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage})
|
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, PeerTLS: &integration.TestTLSInfoWithSpecificUsage})
|
||||||
@ -200,7 +201,7 @@ func TestAddMemberAfterClusterFullRotation(t *testing.T) {
|
|||||||
clusterMustProgress(t, c.Members)
|
clusterMustProgress(t, c.Members)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure we can remove a member then add a new one back immediately.
|
// TestIssue2681 ensures we can remove a member then add a new one back immediately.
|
||||||
func TestIssue2681(t *testing.T) {
|
func TestIssue2681(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true})
|
c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5, DisableStrictReconfigCheck: true})
|
||||||
@ -216,10 +217,10 @@ func TestIssue2681(t *testing.T) {
|
|||||||
clusterMustProgress(t, c.Members)
|
clusterMustProgress(t, c.Members)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure we can remove a member after a snapshot then add a new one back.
|
// TestIssue2746 ensures we can remove a member after a snapshot then add a new one back.
|
||||||
func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
|
func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
|
||||||
|
|
||||||
// With 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
|
// TestIssue2746WithThree tests with 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
|
||||||
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
|
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
|
||||||
|
|
||||||
func testIssue2746(t *testing.T, members int) {
|
func testIssue2746(t *testing.T, members int) {
|
||||||
@ -242,7 +243,7 @@ func testIssue2746(t *testing.T, members int) {
|
|||||||
clusterMustProgress(t, c.Members)
|
clusterMustProgress(t, c.Members)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure etcd will not panic when removing a just started member.
|
// TestIssue2904 ensures etcd will not panic when removing a just started member.
|
||||||
func TestIssue2904(t *testing.T) {
|
func TestIssue2904(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
// start 1-member Cluster to ensure member 0 is the leader of the Cluster.
|
// start 1-member Cluster to ensure member 0 is the leader of the Cluster.
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
integration2 "go.etcd.io/etcd/tests/v3/framework/integration"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TestCompactionHash tests the compaction hash
|
||||||
// TODO: Change this to fuzz test
|
// TODO: Change this to fuzz test
|
||||||
func TestCompactionHash(t *testing.T) {
|
func TestCompactionHash(t *testing.T) {
|
||||||
integration2.BeforeTest(t)
|
integration2.BeforeTest(t)
|
||||||
|
@ -174,7 +174,7 @@ func TestElectionFailover(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestElectionSessionRelock ensures that campaigning twice on the same election
|
// TestElectionSessionRecampaign ensures that campaigning twice on the same election
|
||||||
// with the same lock will Proclaim instead of deadlocking.
|
// with the same lock will Proclaim instead of deadlocking.
|
||||||
func TestElectionSessionRecampaign(t *testing.T) {
|
func TestElectionSessionRecampaign(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
|
@ -87,7 +87,7 @@ func TestV3PutOverwrite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPutRestart checks if a put after an unrelated member restart succeeds
|
// TestV3PutRestart checks if a put after an unrelated member restart succeeds
|
||||||
func TestV3PutRestart(t *testing.T) {
|
func TestV3PutRestart(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3, UseBridge: true})
|
||||||
@ -395,7 +395,7 @@ func TestV3TxnDuplicateKeys(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Testv3TxnRevision tests that the transaction header revision is set as expected.
|
// TestV3TxnRevision tests that the transaction header revision is set as expected.
|
||||||
func TestV3TxnRevision(t *testing.T) {
|
func TestV3TxnRevision(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
|
||||||
@ -445,7 +445,7 @@ func TestV3TxnRevision(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected
|
// TestV3TxnCmpHeaderRev tests that the txn header revision is set as expected
|
||||||
// when compared to the Succeeded field in the txn response.
|
// when compared to the Succeeded field in the txn response.
|
||||||
func TestV3TxnCmpHeaderRev(t *testing.T) {
|
func TestV3TxnCmpHeaderRev(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
@ -613,7 +613,7 @@ func TestV3TxnRangeCompare(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestV3TxnNested tests nested txns follow paths as expected.
|
// TestV3TxnNestedPath tests nested txns follow paths as expected.
|
||||||
func TestV3TxnNestedPath(t *testing.T) {
|
func TestV3TxnNestedPath(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
|
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1})
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestV3LeasePrmote ensures the newly elected leader can promote itself
|
// TestV3LeasePromote ensures the newly elected leader can promote itself
|
||||||
// to the primary lessor, refresh the leases and start to manage leases.
|
// to the primary lessor, refresh the leases and start to manage leases.
|
||||||
// TODO: use customized clock to make this test go faster?
|
// TODO: use customized clock to make this test go faster?
|
||||||
func TestV3LeasePromote(t *testing.T) {
|
func TestV3LeasePromote(t *testing.T) {
|
||||||
@ -108,7 +108,7 @@ func TestV3LeaseRevoke(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestV3LeaseGrantById ensures leases may be created by a given id.
|
// TestV3LeaseGrantByID ensures leases may be created by a given id.
|
||||||
func TestV3LeaseGrantByID(t *testing.T) {
|
func TestV3LeaseGrantByID(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
|
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
|
||||||
|
@ -1033,7 +1033,7 @@ func TestWatchWithProgressNotify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestV3WatcMultiOpenhClose opens many watchers concurrently on multiple streams.
|
// TestV3WatchClose opens many watchers concurrently on multiple streams.
|
||||||
func TestV3WatchClose(t *testing.T) {
|
func TestV3WatchClose(t *testing.T) {
|
||||||
integration.BeforeTest(t)
|
integration.BeforeTest(t)
|
||||||
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, UseBridge: true})
|
||||||
|
Loading…
x
Reference in New Issue
Block a user