Formatted source code for go 1.19.6.

Signed-off-by: James Blair <mail@jamesblair.net>
This commit is contained in:
James Blair 2023-02-20 12:44:14 +13:00
parent 7318f5dd0c
commit a91bacf567
No known key found for this signature in database
41 changed files with 563 additions and 554 deletions

View File

@ -2,16 +2,18 @@
// source: auth.proto
/*
Package authpb is a generated protocol buffer package.
Package authpb is a generated protocol buffer package.
It is generated from these files:
auth.proto
It is generated from these files:
It has these top-level messages:
UserAddOptions
User
Permission
Role
auth.proto
It has these top-level messages:
UserAddOptions
User
Permission
Role
*/
package authpb

View File

@ -68,6 +68,5 @@ Use a custom context to set timeouts on your operations:
// handle error
}
}
*/
package client

View File

@ -57,9 +57,9 @@ func (rc *recorder) GetCurrentState() (state connectivity.State) {
// RecordTransition records state change happening in subConn and based on that
// it evaluates what aggregated state should be.
//
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
// - Else the aggregated state is TransientFailure.
// - If at least one SubConn in Ready, the aggregated state is Ready;
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
// - Else the aggregated state is TransientFailure.
//
// Idle and Shutdown are not considered.
//

View File

@ -102,5 +102,4 @@
// The grpc load balancer is registered statically and is shared across etcd clients.
// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
// variable. E.g. "ETCD_CLIENT_DEBUG=1".
//
package clientv3

View File

@ -1016,12 +1016,12 @@ func TestWatchCancelOnServer(t *testing.T) {
// TestWatchOverlapContextCancel stresses the watcher stream teardown path by
// creating/canceling watchers to ensure that new watchers are not taken down
// by a torn down watch stream. The sort of race that's being detected:
// 1. create w1 using a cancelable ctx with %v as "ctx"
// 2. cancel ctx
// 3. watcher client begins tearing down watcher grpc stream since no more watchers
// 3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream
// 4. watcher client finishes tearing down stream on "ctx"
// 5. w2 comes back canceled
// 1. create w1 using a cancelable ctx with %v as "ctx"
// 2. cancel ctx
// 3. watcher client begins tearing down watcher grpc stream since no more watchers
// 3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream
// 4. watcher client finishes tearing down stream on "ctx"
// 5. w2 comes back canceled
func TestWatchOverlapContextCancel(t *testing.T) {
f := func(clus *integration.ClusterV3) {}
testWatchOverlapContextCancel(t, f)

View File

@ -19,28 +19,27 @@
//
// First, create a leasing KV from a clientv3.Client 'cli':
//
// lkv, err := leasing.NewKV(cli, "leasing-prefix")
// if err != nil {
// // handle error
// }
// lkv, err := leasing.NewKV(cli, "leasing-prefix")
// if err != nil {
// // handle error
// }
//
// A range request for a key "abc" tries to acquire a leasing key so it can cache the range's
// key locally. On the server, the leasing key is stored to "leasing-prefix/abc":
//
// resp, err := lkv.Get(context.TODO(), "abc")
// resp, err := lkv.Get(context.TODO(), "abc")
//
// Future linearized read requests using 'lkv' will be served locally for the lease's lifetime:
//
// resp, err = lkv.Get(context.TODO(), "abc")
// resp, err = lkv.Get(context.TODO(), "abc")
//
// If another leasing client writes to a leased key, then the owner relinquishes its exclusive
// access, permitting the writer to modify the key:
//
// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
// if err != nil {
// // handle error
// }
// lkv2.Put(context.TODO(), "abc", "456")
// resp, err = lkv.Get("abc")
//
// lkv2, err := leasing.NewKV(cli, "leasing-prefix")
// if err != nil {
// // handle error
// }
// lkv2.Put(context.TODO(), "abc", "456")
// resp, err = lkv.Get("abc")
package leasing

View File

@ -39,5 +39,4 @@
// resp, _ = cli.Get(context.TODO(), "abc")
// fmt.Printf("%s\n", resp.Kvs[0].Value)
// // Output: 456
//
package namespace

View File

@ -52,5 +52,4 @@
// r := &etcdnaming.GRPCResolver{Client: c}
// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid))
// }
//
package naming

View File

@ -38,5 +38,4 @@
// cli.KV = ordering.NewKV(cli.KV, vf)
//
// Now calls using 'cli' will reject order violations with an error.
//
package ordering

View File

@ -25,15 +25,14 @@ import (
// Txn is the interface that wraps mini-transactions.
//
// Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
// Txn(context.TODO()).If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations

View File

@ -2,13 +2,15 @@
// source: snap.proto
/*
Package snappb is a generated protocol buffer package.
Package snappb is a generated protocol buffer package.
It is generated from these files:
snap.proto
It is generated from these files:
It has these top-level messages:
Snapshot
snap.proto
It has these top-level messages:
Snapshot
*/
package snappb

View File

@ -98,7 +98,7 @@ func TestStoreStatsDeleteFail(t *testing.T) {
testutil.AssertEqual(t, uint64(1), s.Stats.DeleteFail, "")
}
//Ensure that the number of expirations is recorded in the stats.
// Ensure that the number of expirations is recorded in the stats.
func TestStoreStatsExpireCount(t *testing.T) {
s := newStore()
fc := newFakeClock()

View File

@ -41,5 +41,4 @@
// if err != nil {
// // handle error!
// }
//
package v3client

View File

@ -2,21 +2,23 @@
// source: v3election.proto
/*
Package v3electionpb is a generated protocol buffer package.
Package v3electionpb is a generated protocol buffer package.
It is generated from these files:
v3election.proto
It is generated from these files:
It has these top-level messages:
CampaignRequest
CampaignResponse
LeaderKey
LeaderRequest
LeaderResponse
ResignRequest
ResignResponse
ProclaimRequest
ProclaimResponse
v3election.proto
It has these top-level messages:
CampaignRequest
CampaignResponse
LeaderKey
LeaderRequest
LeaderResponse
ResignRequest
ResignResponse
ProclaimRequest
ProclaimResponse
*/
package v3electionpb

View File

@ -2,16 +2,18 @@
// source: v3lock.proto
/*
Package v3lockpb is a generated protocol buffer package.
Package v3lockpb is a generated protocol buffer package.
It is generated from these files:
v3lock.proto
It is generated from these files:
It has these top-level messages:
LockRequest
LockResponse
UnlockRequest
UnlockResponse
v3lock.proto
It has these top-level messages:
LockRequest
LockResponse
UnlockRequest
UnlockResponse
*/
package v3lockpb

View File

@ -2,111 +2,113 @@
// source: etcdserver.proto
/*
Package etcdserverpb is a generated protocol buffer package.
Package etcdserverpb is a generated protocol buffer package.
It is generated from these files:
etcdserver.proto
raft_internal.proto
rpc.proto
It is generated from these files:
It has these top-level messages:
Request
Metadata
RequestHeader
InternalRaftRequest
EmptyResponse
InternalAuthenticateRequest
ResponseHeader
RangeRequest
RangeResponse
PutRequest
PutResponse
DeleteRangeRequest
DeleteRangeResponse
RequestOp
ResponseOp
Compare
TxnRequest
TxnResponse
CompactionRequest
CompactionResponse
HashRequest
HashKVRequest
HashKVResponse
HashResponse
SnapshotRequest
SnapshotResponse
WatchRequest
WatchCreateRequest
WatchCancelRequest
WatchProgressRequest
WatchResponse
LeaseGrantRequest
LeaseGrantResponse
LeaseRevokeRequest
LeaseRevokeResponse
LeaseCheckpoint
LeaseCheckpointRequest
LeaseCheckpointResponse
LeaseKeepAliveRequest
LeaseKeepAliveResponse
LeaseTimeToLiveRequest
LeaseTimeToLiveResponse
LeaseLeasesRequest
LeaseStatus
LeaseLeasesResponse
Member
MemberAddRequest
MemberAddResponse
MemberRemoveRequest
MemberRemoveResponse
MemberUpdateRequest
MemberUpdateResponse
MemberListRequest
MemberListResponse
MemberPromoteRequest
MemberPromoteResponse
DefragmentRequest
DefragmentResponse
MoveLeaderRequest
MoveLeaderResponse
AlarmRequest
AlarmMember
AlarmResponse
StatusRequest
StatusResponse
AuthEnableRequest
AuthDisableRequest
AuthenticateRequest
AuthUserAddRequest
AuthUserGetRequest
AuthUserDeleteRequest
AuthUserChangePasswordRequest
AuthUserGrantRoleRequest
AuthUserRevokeRoleRequest
AuthRoleAddRequest
AuthRoleGetRequest
AuthUserListRequest
AuthRoleListRequest
AuthRoleDeleteRequest
AuthRoleGrantPermissionRequest
AuthRoleRevokePermissionRequest
AuthEnableResponse
AuthDisableResponse
AuthenticateResponse
AuthUserAddResponse
AuthUserGetResponse
AuthUserDeleteResponse
AuthUserChangePasswordResponse
AuthUserGrantRoleResponse
AuthUserRevokeRoleResponse
AuthRoleAddResponse
AuthRoleGetResponse
AuthRoleListResponse
AuthUserListResponse
AuthRoleDeleteResponse
AuthRoleGrantPermissionResponse
AuthRoleRevokePermissionResponse
etcdserver.proto
raft_internal.proto
rpc.proto
It has these top-level messages:
Request
Metadata
RequestHeader
InternalRaftRequest
EmptyResponse
InternalAuthenticateRequest
ResponseHeader
RangeRequest
RangeResponse
PutRequest
PutResponse
DeleteRangeRequest
DeleteRangeResponse
RequestOp
ResponseOp
Compare
TxnRequest
TxnResponse
CompactionRequest
CompactionResponse
HashRequest
HashKVRequest
HashKVResponse
HashResponse
SnapshotRequest
SnapshotResponse
WatchRequest
WatchCreateRequest
WatchCancelRequest
WatchProgressRequest
WatchResponse
LeaseGrantRequest
LeaseGrantResponse
LeaseRevokeRequest
LeaseRevokeResponse
LeaseCheckpoint
LeaseCheckpointRequest
LeaseCheckpointResponse
LeaseKeepAliveRequest
LeaseKeepAliveResponse
LeaseTimeToLiveRequest
LeaseTimeToLiveResponse
LeaseLeasesRequest
LeaseStatus
LeaseLeasesResponse
Member
MemberAddRequest
MemberAddResponse
MemberRemoveRequest
MemberRemoveResponse
MemberUpdateRequest
MemberUpdateResponse
MemberListRequest
MemberListResponse
MemberPromoteRequest
MemberPromoteResponse
DefragmentRequest
DefragmentResponse
MoveLeaderRequest
MoveLeaderResponse
AlarmRequest
AlarmMember
AlarmResponse
StatusRequest
StatusResponse
AuthEnableRequest
AuthDisableRequest
AuthenticateRequest
AuthUserAddRequest
AuthUserGetRequest
AuthUserDeleteRequest
AuthUserChangePasswordRequest
AuthUserGrantRoleRequest
AuthUserRevokeRoleRequest
AuthRoleAddRequest
AuthRoleGetRequest
AuthUserListRequest
AuthRoleListRequest
AuthRoleDeleteRequest
AuthRoleGrantPermissionRequest
AuthRoleRevokePermissionRequest
AuthEnableResponse
AuthDisableResponse
AuthenticateResponse
AuthUserAddResponse
AuthUserGetResponse
AuthUserDeleteResponse
AuthUserChangePasswordResponse
AuthUserGrantRoleResponse
AuthUserRevokeRoleResponse
AuthRoleAddResponse
AuthRoleGetResponse
AuthRoleListResponse
AuthUserListResponse
AuthRoleDeleteResponse
AuthRoleGrantPermissionResponse
AuthRoleRevokePermissionResponse
*/
package etcdserverpb

View File

@ -364,8 +364,9 @@ func (r *raftNode) start(rh *raftReadyHandler) {
// the applying workflow. But when the client receives the response,
// it doesn't mean etcd has already successfully saved the data,
// including BoltDB and WAL, because:
// 1. etcd commits the boltDB transaction periodically instead of on each request;
// 2. etcd saves WAL entries in parallel with applying the committed entries.
// 1. etcd commits the boltDB transaction periodically instead of on each request;
// 2. etcd saves WAL entries in parallel with applying the committed entries.
//
// Accordingly, it might run into a situation of data loss when the etcd crashes
// immediately after responding to the client and before the boltDB and WAL
// successfully save the data to disk.

View File

@ -41,7 +41,8 @@ x and y of GCD 1 are coprime to each other
x1 = ( coprime of n * idx1 + offset ) % n
x2 = ( coprime of n * idx2 + offset ) % n
(x2 - x1) = coprime of n * (idx2 - idx1) % n
= (idx2 - idx1) = 1
= (idx2 - idx1) = 1
Consecutive x's are guaranteed to be distinct
*/

View File

@ -202,7 +202,6 @@ func TestElectionSessionRecampaign(t *testing.T) {
// candidate can be elected on a new key that is a prefix
// of an existing key. To wit, check for regression
// of bug #6278. https://github.com/etcd-io/etcd/issues/6278
//
func TestElectionOnPrefixOfExistingKey(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)

View File

@ -2,15 +2,17 @@
// source: lease.proto
/*
Package leasepb is a generated protocol buffer package.
Package leasepb is a generated protocol buffer package.
It is generated from these files:
lease.proto
It is generated from these files:
It has these top-level messages:
Lease
LeaseInternalRequest
LeaseInternalResponse
lease.proto
It has these top-level messages:
Lease
LeaseInternalRequest
LeaseInternalResponse
*/
package leasepb

View File

@ -19,7 +19,6 @@
//
// This package should NOT be extended or modified in any way; to modify the
// etcd binary, work in the `go.etcd.io/etcd/etcdmain` package.
//
package main
import "go.etcd.io/etcd/etcdmain"

View File

@ -39,9 +39,10 @@ var (
// key: "foo"
// rev: 5
// generations:
// {empty}
// {4.0, 5.0(t)}
// {1.0, 2.0, 3.0(t)}
//
// {empty}
// {4.0, 5.0(t)}
// {1.0, 2.0, 3.0(t)}
//
// Compact a keyIndex removes the versions with smaller or equal to
// rev except the largest one. If the generation becomes empty
@ -51,22 +52,26 @@ var (
// For example:
// compact(2) on the previous example
// generations:
// {empty}
// {4.0, 5.0(t)}
// {2.0, 3.0(t)}
//
// {empty}
// {4.0, 5.0(t)}
// {2.0, 3.0(t)}
//
// compact(4)
// generations:
// {empty}
// {4.0, 5.0(t)}
//
// {empty}
// {4.0, 5.0(t)}
//
// compact(5):
// generations:
// {empty} -> key SHOULD be removed.
//
// {empty} -> key SHOULD be removed.
//
// compact(6):
// generations:
// {empty} -> key SHOULD be removed.
//
// {empty} -> key SHOULD be removed.
type keyIndex struct {
key []byte
modified revision // the main rev of the last modification

View File

@ -2,14 +2,16 @@
// source: kv.proto
/*
Package mvccpb is a generated protocol buffer package.
Package mvccpb is a generated protocol buffer package.
It is generated from these files:
kv.proto
It is generated from these files:
It has these top-level messages:
KeyValue
Event
kv.proto
It has these top-level messages:
KeyValue
Event
*/
package mvccpb

View File

@ -325,10 +325,10 @@ func (s *watchableStore) moveVictims() (moved int) {
}
// syncWatchers syncs unsynced watchers by:
// 1. choose a set of watchers from the unsynced watcher group
// 2. iterate over the set to get the minimum revision and remove compacted watchers
// 3. use minimum revision to get all key-value pairs and send those events to watchers
// 4. remove synced watchers in set from unsynced group and move to synced group
// 1. choose a set of watchers from the unsynced watcher group
// 2. iterate over the set to get the minimum revision and remove compacted watchers
// 3. use minimum revision to get all key-value pairs and send those events to watchers
// 4. remove synced watchers in set from unsynced group and move to synced group
func (s *watchableStore) syncWatchers() int {
s.mu.Lock()
defer s.mu.Unlock()

View File

@ -341,11 +341,11 @@ func TestWatchRestore(t *testing.T) {
}
// TestWatchRestoreSyncedWatcher tests such a case that:
// 1. watcher is created with a future revision "math.MaxInt64 - 2"
// 2. watcher with a future revision is added to "synced" watcher group
// 3. restore/overwrite storage with snapshot of a higher lasat revision
// 4. restore operation moves "synced" to "unsynced" watcher group
// 5. choose the watcher from step 1, without panic
// 1. watcher is created with a future revision "math.MaxInt64 - 2"
// 2. watcher with a future revision is added to "synced" watcher group
// 3. restore/overwrite storage with snapshot of a higher lasat revision
// 4. restore operation moves "synced" to "unsynced" watcher group
// 5. choose the watcher from step 1, without panic
func TestWatchRestoreSyncedWatcher(t *testing.T) {
b1, b1Path := backend.NewDefaultTmpBackend()
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, nil, nil, StoreConfig{})

View File

@ -241,34 +241,34 @@ type intervalTree struct {
//
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
//
// 0. RB-DELETE(T, z)
// 1.
// 2. y = z
// 3. y-original-color = y.color
// 4.
// 5. if z.left == T.nil
// 6. x = z.right
// 7. RB-TRANSPLANT(T, z, z.right)
// 8. else if z.right == T.nil
// 9. x = z.left
// 10. RB-TRANSPLANT(T, z, z.left)
// 11. else
// 12. y = TREE-MINIMUM(z.right)
// 13. y-original-color = y.color
// 14. x = y.right
// 15. if y.p == z
// 16. x.p = y
// 17. else
// 18. RB-TRANSPLANT(T, y, y.right)
// 19. y.right = z.right
// 20. y.right.p = y
// 21. RB-TRANSPLANT(T, z, y)
// 22. y.left = z.left
// 23. y.left.p = y
// 24. y.color = z.color
// 25.
// 26. if y-original-color == BLACK
// 27. RB-DELETE-FIXUP(T, x)
// RB-DELETE(T, z)
//
// y = z
// y-original-color = y.color
//
// if z.left == T.nil
// x = z.right
// RB-TRANSPLANT(T, z, z.right)
// else if z.right == T.nil
// x = z.left
// RB-TRANSPLANT(T, z, z.left)
// else
// y = TREE-MINIMUM(z.right)
// y-original-color = y.color
// x = y.right
// if y.p == z
// x.p = y
// else
// RB-TRANSPLANT(T, y, y.right)
// y.right = z.right
// y.right.p = y
// RB-TRANSPLANT(T, z, y)
// y.left = z.left
// y.left.p = y
// y.color = z.color
//
// if y-original-color == BLACK
// RB-DELETE-FIXUP(T, x)
// Delete removes the node with the given interval from the tree, returning
// true if a node is in fact removed.
@ -317,48 +317,47 @@ func (ivt *intervalTree) Delete(ivl Interval) bool {
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
//
// 0. RB-DELETE-FIXUP(T, z)
// 1.
// 2. while x ≠ T.root and x.color == BLACK
// 3. if x == x.p.left
// 4. w = x.p.right
// 5. if w.color == RED
// 6. w.color = BLACK
// 7. x.p.color = RED
// 8. LEFT-ROTATE(T, x, p)
// 9. if w.left.color == BLACK and w.right.color == BLACK
// 10. w.color = RED
// 11. x = x.p
// 12. else if w.right.color == BLACK
// 13. w.left.color = BLACK
// 14. w.color = RED
// 15. RIGHT-ROTATE(T, w)
// 16. w = w.p.right
// 17. w.color = x.p.color
// 18. x.p.color = BLACK
// 19. LEFT-ROTATE(T, w.p)
// 20. x = T.root
// 21. else
// 22. w = x.p.left
// 23. if w.color == RED
// 24. w.color = BLACK
// 25. x.p.color = RED
// 26. RIGHT-ROTATE(T, x, p)
// 27. if w.right.color == BLACK and w.left.color == BLACK
// 28. w.color = RED
// 29. x = x.p
// 30. else if w.left.color == BLACK
// 31. w.right.color = BLACK
// 32. w.color = RED
// 33. LEFT-ROTATE(T, w)
// 34. w = w.p.left
// 35. w.color = x.p.color
// 36. x.p.color = BLACK
// 37. RIGHT-ROTATE(T, w.p)
// 38. x = T.root
// 39.
// 40. x.color = BLACK
// RB-DELETE-FIXUP(T, z)
//
// while x ≠ T.root and x.color == BLACK
// if x == x.p.left
// w = x.p.right
// if w.color == RED
// w.color = BLACK
// x.p.color = RED
// LEFT-ROTATE(T, x, p)
// if w.left.color == BLACK and w.right.color == BLACK
// w.color = RED
// x = x.p
// else if w.right.color == BLACK
// w.left.color = BLACK
// w.color = RED
// RIGHT-ROTATE(T, w)
// w = w.p.right
// w.color = x.p.color
// x.p.color = BLACK
// LEFT-ROTATE(T, w.p)
// x = T.root
// else
// w = x.p.left
// if w.color == RED
// w.color = BLACK
// x.p.color = RED
// RIGHT-ROTATE(T, x, p)
// if w.right.color == BLACK and w.left.color == BLACK
// w.color = RED
// x = x.p
// else if w.left.color == BLACK
// w.right.color = BLACK
// w.color = RED
// LEFT-ROTATE(T, w)
// w = w.p.left
// w.color = x.p.color
// x.p.color = BLACK
// RIGHT-ROTATE(T, w.p)
// x = T.root
//
// x.color = BLACK
func (ivt *intervalTree) deleteFixup(x *intervalNode) {
for x != ivt.root && x.color(ivt.sentinel) == black {
if x == x.parent.left { // line 3-20
@ -439,32 +438,32 @@ func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *inte
//
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
//
// 0. RB-INSERT(T, z)
// 1.
// 2. y = T.nil
// 3. x = T.root
// 4.
// 5. while x ≠ T.nil
// 6. y = x
// 7. if z.key < x.key
// 8. x = x.left
// 9. else
// 10. x = x.right
// 11.
// 12. z.p = y
// 13.
// 14. if y == T.nil
// 15. T.root = z
// 16. else if z.key < y.key
// 17. y.left = z
// 18. else
// 19. y.right = z
// 20.
// 21. z.left = T.nil
// 22. z.right = T.nil
// 23. z.color = RED
// 24.
// 25. RB-INSERT-FIXUP(T, z)
// RB-INSERT(T, z)
//
// y = T.nil
// x = T.root
//
// while x ≠ T.nil
// y = x
// if z.key < x.key
// x = x.left
// else
// x = x.right
//
// z.p = y
//
// if y == T.nil
// T.root = z
// else if z.key < y.key
// y.left = z
// else
// y.right = z
//
// z.left = T.nil
// z.right = T.nil
// z.color = RED
//
// RB-INSERT-FIXUP(T, z)
// Insert adds a node with the given interval into the tree.
func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
@ -499,38 +498,37 @@ func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
//
// 0. RB-INSERT-FIXUP(T, z)
// 1.
// 2. while z.p.color == RED
// 3. if z.p == z.p.p.left
// 4. y = z.p.p.right
// 5. if y.color == RED
// 6. z.p.color = BLACK
// 7. y.color = BLACK
// 8. z.p.p.color = RED
// 9. z = z.p.p
// 10. else if z == z.p.right
// 11. z = z.p
// 12. LEFT-ROTATE(T, z)
// 13. z.p.color = BLACK
// 14. z.p.p.color = RED
// 15. RIGHT-ROTATE(T, z.p.p)
// 16. else
// 17. y = z.p.p.left
// 18. if y.color == RED
// 19. z.p.color = BLACK
// 20. y.color = BLACK
// 21. z.p.p.color = RED
// 22. z = z.p.p
// 23. else if z == z.p.right
// 24. z = z.p
// 25. RIGHT-ROTATE(T, z)
// 26. z.p.color = BLACK
// 27. z.p.p.color = RED
// 28. LEFT-ROTATE(T, z.p.p)
// 29.
// 30. T.root.color = BLACK
// RB-INSERT-FIXUP(T, z)
//
// while z.p.color == RED
// if z.p == z.p.p.left
// y = z.p.p.right
// if y.color == RED
// z.p.color = BLACK
// y.color = BLACK
// z.p.p.color = RED
// z = z.p.p
// else if z == z.p.right
// z = z.p
// LEFT-ROTATE(T, z)
// z.p.color = BLACK
// z.p.p.color = RED
// RIGHT-ROTATE(T, z.p.p)
// else
// y = z.p.p.left
// if y.color == RED
// z.p.color = BLACK
// y.color = BLACK
// z.p.p.color = RED
// z = z.p.p
// else if z == z.p.right
// z = z.p
// RIGHT-ROTATE(T, z)
// z.p.color = BLACK
// z.p.p.color = RED
// LEFT-ROTATE(T, z.p.p)
//
// T.root.color = BLACK
func (ivt *intervalTree) insertFixup(z *intervalNode) {
for z.parent.color(ivt.sentinel) == red {
if z.parent == z.parent.parent.left { // line 3-15
@ -578,26 +576,25 @@ func (ivt *intervalTree) insertFixup(z *intervalNode) {
//
// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
//
// 0. LEFT-ROTATE(T, x)
// 1.
// 2. y = x.right
// 3. x.right = y.left
// 4.
// 5. if y.left ≠ T.nil
// 6. y.left.p = x
// 7.
// 8. y.p = x.p
// 9.
// 10. if x.p == T.nil
// 11. T.root = y
// 12. else if x == x.p.left
// 13. x.p.left = y
// 14. else
// 15. x.p.right = y
// 16.
// 17. y.left = x
// 18. x.p = y
// LEFT-ROTATE(T, x)
//
// y = x.right
// x.right = y.left
//
// if y.left ≠ T.nil
// y.left.p = x
//
// y.p = x.p
//
// if x.p == T.nil
// T.root = y
// else if x == x.p.left
// x.p.left = y
// else
// x.p.right = y
//
// y.left = x
// x.p = y
func (ivt *intervalTree) rotateLeft(x *intervalNode) {
// rotateLeft x must have right child
if x.right == ivt.sentinel {
@ -624,26 +621,25 @@ func (ivt *intervalTree) rotateLeft(x *intervalNode) {
// rotateRight moves x so it is right of its left child
//
// 0. RIGHT-ROTATE(T, x)
// 1.
// 2. y = x.left
// 3. x.left = y.right
// 4.
// 5. if y.right ≠ T.nil
// 6. y.right.p = x
// 7.
// 8. y.p = x.p
// 9.
// 10. if x.p == T.nil
// 11. T.root = y
// 12. else if x == x.p.right
// 13. x.p.right = y
// 14. else
// 15. x.p.left = y
// 16.
// 17. y.right = x
// 18. x.p = y
// RIGHT-ROTATE(T, x)
//
// y = x.left
// x.left = y.right
//
// if y.right ≠ T.nil
// y.right.p = x
//
// y.p = x.p
//
// if x.p == T.nil
// T.root = y
// else if x == x.p.right
// x.p.right = y
// else
// x.p.left = y
//
// y.right = x
// x.p = y
func (ivt *intervalTree) rotateRight(x *intervalNode) {
// rotateRight x must have left child
if x.left == ivt.sentinel {

View File

@ -63,27 +63,28 @@ func TestIntervalTreeInsert(t *testing.T) {
// Use https://www.cs.usfca.edu/~galles/visualization/RedBlack.html for test case creation.
//
// Regular Binary Search Tree
// [0,1]
// \
// [1,2]
// \
// [3,4]
// \
// [5,6]
// \
// [7,8]
// \
// [8,9]
//
// [0,1]
// \
// [1,2]
// \
// [3,4]
// \
// [5,6]
// \
// [7,8]
// \
// [8,9]
//
// Self-Balancing Binary Search Tree
// [1,2]
// / \
// [0,1] [5,6]
// / \
// [3,4] [7,8]
// \
// [8,9]
//
// [1,2]
// / \
// [0,1] [5,6]
// / \
// [3,4] [7,8]
// \
// [8,9]
func TestIntervalTreeSelfBalanced(t *testing.T) {
ivt := NewIntervalTree()
ivt.Insert(NewInt64Interval(0, 1), 0)
@ -120,58 +121,56 @@ func TestIntervalTreeSelfBalanced(t *testing.T) {
// Use https://www.cs.usfca.edu/~galles/visualization/RedBlack.html for test case creation.
// See https://github.com/etcd-io/etcd/issues/10877 for more detail.
//
//
// After insertion:
// [510,511]
// / \
// ---------- -----------------------
// / \
// [82,83] [830,831]
// / \ / \
// / \ / \
// [11,12] [383,384](red) [647,648] [899,900](red)
// / \ / \ / \
// / \ / \ / \
// [261,262] [410,411] [514,515](red) [815,816](red) [888,889] [972,973]
// / \ /
// / \ /
// [238,239](red) [292,293](red) [953,954](red)
//
// [510,511]
// / \
// ---------- -----------------------
// / \
// [82,83] [830,831]
// / \ / \
// / \ / \
// [11,12] [383,384](red) [647,648] [899,900](red)
// / \ / \ / \
// / \ / \ / \
// [261,262] [410,411] [514,515](red) [815,816](red) [888,889] [972,973]
// / \ /
// / \ /
// [238,239](red) [292,293](red) [953,954](red)
//
// After deleting 514 (no rebalance):
// [510,511]
// / \
// ---------- -----------------------
// / \
// [82,83] [830,831]
// / \ / \
// / \ / \
// [11,12] [383,384](red) [647,648] [899,900](red)
// / \ \ / \
// / \ \ / \
// [261,262] [410,411] [815,816](red) [888,889] [972,973]
// / \ /
// / \ /
// [238,239](red) [292,293](red) [953,954](red)
//
// [510,511]
// / \
// ---------- -----------------------
// / \
// [82,83] [830,831]
// / \ / \
// / \ / \
// [11,12] [383,384](red) [647,648] [899,900](red)
// / \ \ / \
// / \ \ / \
// [261,262] [410,411] [815,816](red) [888,889] [972,973]
// / \ /
// / \ /
// [238,239](red) [292,293](red) [953,954](red)
//
// After deleting 11 (requires rebalancing):
// [510,511]
// / \
// ---------- --------------------------
// / \
// [383,384] [830,831]
// / \ / \
// / \ / \
// [261,262](red) [410,411] [647,648] [899,900](red)
// / \ \ / \
// / \ \ / \
// [82,83] [292,293] [815,816](red) [888,889] [972,973]
// \ /
// \ /
// [238,239](red) [953,954](red)
//
//
// [510,511]
// / \
// ---------- --------------------------
// / \
// [383,384] [830,831]
// / \ / \
// / \ / \
// [261,262](red) [410,411] [647,648] [899,900](red)
// / \ \ / \
// / \ \ / \
// [82,83] [292,293] [815,816](red) [888,889] [972,973]
// \ /
// \ /
// [238,239](red) [953,954](red)
func TestIntervalTreeDelete(t *testing.T) {
ivt := NewIntervalTree()
ivt.Insert(NewInt64Interval(510, 511), 0)

View File

@ -33,10 +33,9 @@ var _ Logger = &defaultLogger{}
//
// For example:
//
// var defaultLogger Logger
// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
// defaultLogger = NewLogger(g)
//
// var defaultLogger Logger
// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
// defaultLogger = NewLogger(g)
func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
type defaultLogger struct {

View File

@ -26,9 +26,8 @@ var _ Logger = &packageLogger{}
//
// For example:
//
// var defaultLogger Logger
// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot")
//
// var defaultLogger Logger
// defaultLogger = NewPackageLogger("go.etcd.io/etcd", "snapshot")
func NewPackageLogger(repo, pkg string) Logger {
return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
}

View File

@ -35,7 +35,6 @@ running(leaking) after all tests.
defer testutil.AfterTest(t)
...
}
*/
func CheckLeakedGoroutine() bool {
if testing.Short() {

View File

@ -235,8 +235,8 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, additionalUsages .
// Previously,
// 1. Server has non-empty (*tls.Config).Certificates on client hello
// 2. Server calls (*tls.Config).GetCertificate iff:
// - Server's (*tls.Config).Certificates is not empty, or
// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
// - Server's (*tls.Config).Certificates is not empty, or
// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
//
// When (*tls.Config).Certificates is always populated on initial handshake,
// client is expected to provide a valid matching SNI to pass the TLS

View File

@ -37,9 +37,11 @@ type Changer struct {
// config is empty and initializes it with a copy of the incoming (=left)
// majority config. That is, it transitions from
//
// (1 2 3)&&()
// (1 2 3)&&()
//
// to
// (1 2 3)&&(1 2 3).
//
// (1 2 3)&&(1 2 3).
//
// The supplied changes are then applied to the incoming majority config,
// resulting in a joint configuration that in terms of the Raft thesis[1]

View File

@ -25,46 +25,46 @@ A simple example application, _raftexample_, is also available to help illustrat
how to use this package in practice:
https://github.com/etcd-io/etcd/tree/master/contrib/raftexample
Usage
# Usage
The primary object in raft is a Node. You either start a Node from scratch
using raft.StartNode or start a Node from some initial state using raft.RestartNode.
To start a node from scratch:
storage := raft.NewMemoryStorage()
c := &Config{
ID: 0x01,
ElectionTick: 10,
HeartbeatTick: 1,
Storage: storage,
MaxSizePerMsg: 4096,
MaxInflightMsgs: 256,
}
n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
storage := raft.NewMemoryStorage()
c := &Config{
ID: 0x01,
ElectionTick: 10,
HeartbeatTick: 1,
Storage: storage,
MaxSizePerMsg: 4096,
MaxInflightMsgs: 256,
}
n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
To restart a node from previous state:
storage := raft.NewMemoryStorage()
storage := raft.NewMemoryStorage()
// recover the in-memory storage from persistent
// snapshot, state and entries.
storage.ApplySnapshot(snapshot)
storage.SetHardState(state)
storage.Append(entries)
// recover the in-memory storage from persistent
// snapshot, state and entries.
storage.ApplySnapshot(snapshot)
storage.SetHardState(state)
storage.Append(entries)
c := &Config{
ID: 0x01,
ElectionTick: 10,
HeartbeatTick: 1,
Storage: storage,
MaxSizePerMsg: 4096,
MaxInflightMsgs: 256,
}
c := &Config{
ID: 0x01,
ElectionTick: 10,
HeartbeatTick: 1,
Storage: storage,
MaxSizePerMsg: 4096,
MaxInflightMsgs: 256,
}
// restart raft without peer information.
// peer information is already included in the storage.
n := raft.RestartNode(c)
// restart raft without peer information.
// peer information is already included in the storage.
n := raft.RestartNode(c)
Now that you are holding onto a Node you have a few responsibilities:
@ -120,29 +120,29 @@ represented by an abstract "tick".
The total state machine handling loop will look something like this:
for {
select {
case <-s.Ticker:
n.Tick()
case rd := <-s.Node.Ready():
saveToStorage(rd.State, rd.Entries, rd.Snapshot)
send(rd.Messages)
if !raft.IsEmptySnap(rd.Snapshot) {
processSnapshot(rd.Snapshot)
}
for _, entry := range rd.CommittedEntries {
process(entry)
if entry.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
cc.Unmarshal(entry.Data)
s.Node.ApplyConfChange(cc)
}
}
s.Node.Advance()
case <-s.done:
return
}
}
for {
select {
case <-s.Ticker:
n.Tick()
case rd := <-s.Node.Ready():
saveToStorage(rd.State, rd.Entries, rd.Snapshot)
send(rd.Messages)
if !raft.IsEmptySnap(rd.Snapshot) {
processSnapshot(rd.Snapshot)
}
for _, entry := range rd.CommittedEntries {
process(entry)
if entry.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
cc.Unmarshal(entry.Data)
s.Node.ApplyConfChange(cc)
}
}
s.Node.Advance()
case <-s.done:
return
}
}
To propose changes to the state machine from your node take your application
data, serialize it into a byte slice and call:
@ -169,7 +169,7 @@ given ID MUST be used only once even if the old node has been removed.
This means that for example IP addresses make poor node IDs since they
may be reused. Node IDs must be non-zero.
Implementation notes
# Implementation notes
This implementation is up to date with the final Raft thesis
(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our
@ -194,7 +194,7 @@ cannot be removed any more since the cluster cannot make progress.
For this reason it is highly recommended to use three or more nodes in
every cluster.
MessageType
# MessageType
Package raft sends and receives message in Protocol Buffer format (defined
in raftpb package). Each state (follower, candidate, leader) implements its
@ -295,6 +295,5 @@ stale log entries:
that the follower that sent this 'MsgUnreachable' is not reachable, often
indicating 'MsgApp' is lost. When follower's progress state is replicate,
the leader sets it back to probe.
*/
package raft

View File

@ -146,12 +146,14 @@ func TestAppend(t *testing.T) {
// TestLogMaybeAppend ensures:
// If the given (index, term) matches with the existing log:
// 1. If an existing entry conflicts with a new one (same index
// but different terms), delete the existing entry and all that
// follow it
// 2.Append any new entries not already in the log
// 1. If an existing entry conflicts with a new one (same index
// but different terms), delete the existing entry and all that
// follow it
// 2.Append any new entries not already in the log
//
// If the given (index, term) does not match with the existing log:
// return false
//
// return false
func TestLogMaybeAppend(t *testing.T) {
previousEnts := []pb.Entry{{Index: 1, Term: 1}, {Index: 2, Term: 2}, {Index: 3, Term: 3}}
lastindex := uint64(3)
@ -528,7 +530,7 @@ func TestStableToWithSnap(t *testing.T) {
}
}
//TestCompaction ensures that the number of log entries is correct after compactions.
// TestCompaction ensures that the number of log entries is correct after compactions.
func TestCompaction(t *testing.T) {
tests := []struct {
lastIndex uint64

View File

@ -958,14 +958,14 @@ func (s *ignoreSizeHintMemStorage) Entries(lo, hi uint64, maxSize uint64) ([]raf
// Storage's Entries size limitation is slightly more permissive than Raft's
// internal one. The original bug was the following:
//
// - node learns that index 11 (or 100, doesn't matter) is committed
// - nextEnts returns index 1..10 in CommittedEntries due to size limiting. However,
// index 10 already exceeds maxBytes, due to a user-provided impl of Entries.
// - Commit index gets bumped to 10
// - the node persists the HardState, but crashes before applying the entries
// - upon restart, the storage returns the same entries, but `slice` takes a different code path
// (since it is now called with an upper bound of 10) and removes the last entry.
// - Raft emits a HardState with a regressing commit index.
// - node learns that index 11 (or 100, doesn't matter) is committed
// - nextEnts returns index 1..10 in CommittedEntries due to size limiting. However,
// index 10 already exceeds maxBytes, due to a user-provided impl of Entries.
// - Commit index gets bumped to 10
// - the node persists the HardState, but crashes before applying the entries
// - upon restart, the storage returns the same entries, but `slice` takes a different code path
// (since it is now called with an upper bound of 10) and removes the last entry.
// - Raft emits a HardState with a regressing commit index.
//
// A simpler version of this test would have the storage return a lot less entries than dictated
// by maxSize (for example, exactly one entry) after the restart, resulting in a larger regression.

View File

@ -1212,10 +1212,10 @@ func TestStepIgnoreOldTermMsg(t *testing.T) {
}
// TestHandleMsgApp ensures:
// 1. Reply false if log doesnt contain an entry at prevLogIndex whose term matches prevLogTerm.
// 2. If an existing entry conflicts with a new one (same index but different terms),
// delete the existing entry and all that follow it; append any new entries not already in the log.
// 3. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry).
// 1. Reply false if log doesnt contain an entry at prevLogIndex whose term matches prevLogTerm.
// 2. If an existing entry conflicts with a new one (same index but different terms),
// delete the existing entry and all that follow it; append any new entries not already in the log.
// 3. If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry).
func TestHandleMsgApp(t *testing.T) {
tests := []struct {
m pb.Message

View File

@ -2,21 +2,23 @@
// source: raft.proto
/*
Package raftpb is a generated protocol buffer package.
Package raftpb is a generated protocol buffer package.
It is generated from these files:
raft.proto
It is generated from these files:
It has these top-level messages:
Entry
SnapshotMetadata
Snapshot
Message
HardState
ConfState
ConfChange
ConfChangeSingle
ConfChangeV2
raft.proto
It has these top-level messages:
Entry
SnapshotMetadata
Snapshot
Message
HardState
ConfState
ConfChange
ConfChangeSingle
ConfChangeV2
*/
package raftpb

View File

@ -723,17 +723,17 @@ func TestRawNodeStatus(t *testing.T) {
// TestNodeCommitPaginationAfterRestart. The anomaly here was even worse as the
// Raft group would forget to apply entries:
//
// - node learns that index 11 is committed
// - nextEnts returns index 1..10 in CommittedEntries (but index 10 already
// exceeds maxBytes), which isn't noticed internally by Raft
// - Commit index gets bumped to 10
// - the node persists the HardState, but crashes before applying the entries
// - upon restart, the storage returns the same entries, but `slice` takes a
// different code path and removes the last entry.
// - Raft does not emit a HardState, but when the app calls Advance(), it bumps
// its internal applied index cursor to 10 (when it should be 9)
// - the next Ready asks the app to apply index 11 (omitting index 10), losing a
// write.
// - node learns that index 11 is committed
// - nextEnts returns index 1..10 in CommittedEntries (but index 10 already
// exceeds maxBytes), which isn't noticed internally by Raft
// - Commit index gets bumped to 10
// - the node persists the HardState, but crashes before applying the entries
// - upon restart, the storage returns the same entries, but `slice` takes a
// different code path and removes the last entry.
// - Raft does not emit a HardState, but when the app calls Advance(), it bumps
// its internal applied index cursor to 10 (when it should be 9)
// - the next Ready asks the app to apply index 11 (omitting index 10), losing a
// write.
func TestRawNodeCommitPaginationAfterRestart(t *testing.T) {
s := &ignoreSizeHintMemStorage{
MemoryStorage: NewMemoryStorage(),

View File

@ -300,7 +300,7 @@ IRRCompaction, IRRLeaseGrant, IRRLeaseRevoke, IRRLeaseCheckpoint`, et)
return filters
}
// listEntriesType filters and prints entries based on the entry-type flag,
// listEntriesType filters and prints entries based on the entry-type flag,
func listEntriesType(entrytype string, streamdecoder string, ents []raftpb.Entry) {
entryFilters := evaluateEntrytypeFlag(entrytype)
printerMap := map[string]EntryPrinter{"InternalRaftRequest": printInternalRaftRequest,

View File

@ -70,6 +70,5 @@ snapshot to the end of the WAL are read first:
This will give you the metadata, the last raft.State and the slice of
raft.Entry items in the log.
*/
package wal

View File

@ -2,14 +2,16 @@
// source: record.proto
/*
Package walpb is a generated protocol buffer package.
Package walpb is a generated protocol buffer package.
It is generated from these files:
record.proto
It is generated from these files:
It has these top-level messages:
Record
Snapshot
record.proto
It has these top-level messages:
Record
Snapshot
*/
package walpb