mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Formatted source code for go 1.19.6.
Signed-off-by: James Blair <mail@jamesblair.net>
This commit is contained in:
@@ -98,7 +98,7 @@ func TestStoreStatsDeleteFail(t *testing.T) {
|
||||
testutil.AssertEqual(t, uint64(1), s.Stats.DeleteFail, "")
|
||||
}
|
||||
|
||||
//Ensure that the number of expirations is recorded in the stats.
|
||||
// Ensure that the number of expirations is recorded in the stats.
|
||||
func TestStoreStatsExpireCount(t *testing.T) {
|
||||
s := newStore()
|
||||
fc := newFakeClock()
|
||||
|
||||
@@ -41,5 +41,4 @@
|
||||
// if err != nil {
|
||||
// // handle error!
|
||||
// }
|
||||
//
|
||||
package v3client
|
||||
|
||||
@@ -340,8 +340,9 @@ func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
// the applying workflow. But when the client receives the response,
|
||||
// it doesn't mean etcd has already successfully saved the data,
|
||||
// including BoltDB and WAL, because:
|
||||
// 1. etcd commits the boltDB transaction periodically instead of on each request;
|
||||
// 2. etcd saves WAL entries in parallel with applying the committed entries.
|
||||
// 1. etcd commits the boltDB transaction periodically instead of on each request;
|
||||
// 2. etcd saves WAL entries in parallel with applying the committed entries.
|
||||
//
|
||||
// Accordingly, it might run into a situation of data loss when the etcd crashes
|
||||
// immediately after responding to the client and before the boltDB and WAL
|
||||
// successfully save the data to disk.
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
//
|
||||
// This package should NOT be extended or modified in any way; to modify the
|
||||
// etcd binary, work in the `go.etcd.io/etcd/etcdmain` package.
|
||||
//
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@@ -39,9 +39,10 @@ var (
|
||||
// key: "foo"
|
||||
// rev: 5
|
||||
// generations:
|
||||
// {empty}
|
||||
// {4.0, 5.0(t)}
|
||||
// {1.0, 2.0, 3.0(t)}
|
||||
//
|
||||
// {empty}
|
||||
// {4.0, 5.0(t)}
|
||||
// {1.0, 2.0, 3.0(t)}
|
||||
//
|
||||
// Compact a keyIndex removes the versions with smaller or equal to
|
||||
// rev except the largest one. If the generation becomes empty
|
||||
@@ -51,22 +52,26 @@ var (
|
||||
// For example:
|
||||
// compact(2) on the previous example
|
||||
// generations:
|
||||
// {empty}
|
||||
// {4.0, 5.0(t)}
|
||||
// {2.0, 3.0(t)}
|
||||
//
|
||||
// {empty}
|
||||
// {4.0, 5.0(t)}
|
||||
// {2.0, 3.0(t)}
|
||||
//
|
||||
// compact(4)
|
||||
// generations:
|
||||
// {empty}
|
||||
// {4.0, 5.0(t)}
|
||||
//
|
||||
// {empty}
|
||||
// {4.0, 5.0(t)}
|
||||
//
|
||||
// compact(5):
|
||||
// generations:
|
||||
// {empty} -> key SHOULD be removed.
|
||||
//
|
||||
// {empty} -> key SHOULD be removed.
|
||||
//
|
||||
// compact(6):
|
||||
// generations:
|
||||
// {empty} -> key SHOULD be removed.
|
||||
//
|
||||
// {empty} -> key SHOULD be removed.
|
||||
type keyIndex struct {
|
||||
key []byte
|
||||
modified revision // the main rev of the last modification
|
||||
|
||||
@@ -324,10 +324,10 @@ func (s *watchableStore) moveVictims() (moved int) {
|
||||
}
|
||||
|
||||
// syncWatchers syncs unsynced watchers by:
|
||||
// 1. choose a set of watchers from the unsynced watcher group
|
||||
// 2. iterate over the set to get the minimum revision and remove compacted watchers
|
||||
// 3. use minimum revision to get all key-value pairs and send those events to watchers
|
||||
// 4. remove synced watchers in set from unsynced group and move to synced group
|
||||
// 1. choose a set of watchers from the unsynced watcher group
|
||||
// 2. iterate over the set to get the minimum revision and remove compacted watchers
|
||||
// 3. use minimum revision to get all key-value pairs and send those events to watchers
|
||||
// 4. remove synced watchers in set from unsynced group and move to synced group
|
||||
func (s *watchableStore) syncWatchers() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
@@ -341,11 +341,11 @@ func TestWatchRestore(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestWatchRestoreSyncedWatcher tests such a case that:
|
||||
// 1. watcher is created with a future revision "math.MaxInt64 - 2"
|
||||
// 2. watcher with a future revision is added to "synced" watcher group
|
||||
// 3. restore/overwrite storage with snapshot of a higher lasat revision
|
||||
// 4. restore operation moves "synced" to "unsynced" watcher group
|
||||
// 5. choose the watcher from step 1, without panic
|
||||
// 1. watcher is created with a future revision "math.MaxInt64 - 2"
|
||||
// 2. watcher with a future revision is added to "synced" watcher group
|
||||
// 3. restore/overwrite storage with snapshot of a higher lasat revision
|
||||
// 4. restore operation moves "synced" to "unsynced" watcher group
|
||||
// 5. choose the watcher from step 1, without panic
|
||||
func TestWatchRestoreSyncedWatcher(t *testing.T) {
|
||||
b1, b1Path := betesting.NewDefaultTmpBackend(t)
|
||||
s1 := newWatchableStore(zap.NewExample(), b1, &lease.FakeLessor{}, StoreConfig{})
|
||||
|
||||
@@ -70,6 +70,5 @@ snapshot to the end of the WAL are read first:
|
||||
|
||||
This will give you the metadata, the last raft.State and the slice of
|
||||
raft.Entry items in the log.
|
||||
|
||||
*/
|
||||
package wal
|
||||
|
||||
Reference in New Issue
Block a user