mirror of
https://github.com/etcd-io/etcd.git
synced 2024-09-27 06:25:44 +00:00
Merge pull request #13198 from serathius/bootstrap2
etcdserver: Restructure storage packages to make place for schema
This commit is contained in:
commit
28f86ee2cb
@ -15,3 +15,9 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0).
|
||||
### etcdutl v3
|
||||
|
||||
- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13142).
|
||||
|
||||
### Package `server`
|
||||
|
||||
- Package `mvcc` was moved to `storage/mvcc`
|
||||
- Package `mvcc/backend` was moved to `storage/backend`
|
||||
- Package `mvcc/buckets` was moved to `storage/schema`
|
||||
|
@ -31,8 +31,8 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.etcd.io/etcd/server/v3/verify"
|
||||
"go.etcd.io/etcd/server/v3/wal"
|
||||
"go.etcd.io/etcd/server/v3/wal/walpb"
|
||||
@ -311,7 +311,7 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desir
|
||||
|
||||
be := backend.NewDefaultBackend(destDB)
|
||||
defer be.Close()
|
||||
ms := buckets.NewMembershipStore(lg, be)
|
||||
ms := schema.NewMembershipStore(lg, be)
|
||||
if err := ms.TrimClusterFromBackend(); err != nil {
|
||||
lg.Fatal("bbolt tx.Membership failed", zap.Error(err))
|
||||
}
|
||||
@ -325,8 +325,8 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desir
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
buckets.UnsafeCreateMetaBucket(tx)
|
||||
buckets.UnsafeUpdateConsistentIndex(tx, idx, term, false)
|
||||
schema.UnsafeCreateMetaBucket(tx)
|
||||
schema.UnsafeUpdateConsistentIndex(tx, idx, term, false)
|
||||
} else {
|
||||
// Thanks to translateWAL not moving entries, but just replacing them with
|
||||
// 'empty', there is no need to update the consistency index.
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"go.etcd.io/etcd/pkg/v3/cobrautl"
|
||||
"go.etcd.io/etcd/server/v3/datadir"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -40,8 +40,8 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.etcd.io/etcd/server/v3/verify"
|
||||
"go.etcd.io/etcd/server/v3/wal"
|
||||
"go.etcd.io/etcd/server/v3/wal/walpb"
|
||||
@ -136,7 +136,7 @@ func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
|
||||
return fmt.Errorf("snapshot file integrity check failed. %d errors found.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings))
|
||||
}
|
||||
ds.TotalSize = tx.Size()
|
||||
v := buckets.ReadStorageVersionFromSnapshot(tx)
|
||||
v := schema.ReadStorageVersionFromSnapshot(tx)
|
||||
if v != nil {
|
||||
ds.Version = v.String()
|
||||
}
|
||||
@ -306,7 +306,7 @@ func (s *v3Manager) saveDB() error {
|
||||
be := backend.NewDefaultBackend(s.outDbPath())
|
||||
defer be.Close()
|
||||
|
||||
err = buckets.NewMembershipStore(s.lg, be).TrimMembershipFromBackend()
|
||||
err = schema.NewMembershipStore(s.lg, be).TrimMembershipFromBackend()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -403,7 +403,7 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
|
||||
s.cl.SetStore(st)
|
||||
be := backend.NewDefaultBackend(s.outDbPath())
|
||||
defer be.Close()
|
||||
s.cl.SetBackend(buckets.NewMembershipStore(s.lg, be))
|
||||
s.cl.SetBackend(schema.NewMembershipStore(s.lg, be))
|
||||
for _, m := range s.cl.Members() {
|
||||
s.cl.AddMember(m, true)
|
||||
}
|
||||
|
@ -17,14 +17,14 @@ package auth
|
||||
import (
|
||||
"go.etcd.io/etcd/api/v3/authpb"
|
||||
"go.etcd.io/etcd/pkg/v3/adt"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions {
|
||||
user := buckets.UnsafeGetUser(lg, tx, userName)
|
||||
user := schema.UnsafeGetUser(lg, tx, userName)
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
@ -33,7 +33,7 @@ func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifie
|
||||
writePerms := adt.NewIntervalTree()
|
||||
|
||||
for _, roleName := range user.Roles {
|
||||
role := buckets.UnsafeGetRole(lg, tx, roleName)
|
||||
role := schema.UnsafeGetRole(lg, tx, roleName)
|
||||
if role == nil {
|
||||
continue
|
||||
}
|
||||
|
@ -28,8 +28,8 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/authpb"
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
@ -225,7 +225,7 @@ func (as *authStore) AuthEnable() error {
|
||||
b.ForceCommit()
|
||||
}()
|
||||
|
||||
u := buckets.UnsafeGetUser(as.lg, tx, rootUser)
|
||||
u := schema.UnsafeGetUser(as.lg, tx, rootUser)
|
||||
if u == nil {
|
||||
return ErrRootUserNotExist
|
||||
}
|
||||
@ -234,7 +234,7 @@ func (as *authStore) AuthEnable() error {
|
||||
return ErrRootRoleNotExist
|
||||
}
|
||||
|
||||
buckets.UnsafeSaveAuthEnabled(tx, true)
|
||||
schema.UnsafeSaveAuthEnabled(tx, true)
|
||||
|
||||
as.enabled = true
|
||||
as.tokenProvider.enable()
|
||||
@ -256,7 +256,7 @@ func (as *authStore) AuthDisable() {
|
||||
b := as.be
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
buckets.UnsafeSaveAuthEnabled(tx, false)
|
||||
schema.UnsafeSaveAuthEnabled(tx, false)
|
||||
as.commitRevision(tx)
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
@ -286,7 +286,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, username)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, username)
|
||||
if user == nil {
|
||||
return nil, ErrAuthFailed
|
||||
}
|
||||
@ -324,7 +324,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user = buckets.UnsafeGetUser(as.lg, tx, username)
|
||||
user = schema.UnsafeGetUser(as.lg, tx, username)
|
||||
if user == nil {
|
||||
return 0, ErrAuthFailed
|
||||
}
|
||||
@ -351,7 +351,7 @@ func (as *authStore) Recover(be backend.Backend) {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
|
||||
enabled := buckets.UnsafeReadAuthEnabled(tx)
|
||||
enabled := schema.UnsafeReadAuthEnabled(tx)
|
||||
as.setRevision(getRevision(tx))
|
||||
|
||||
tx.Unlock()
|
||||
@ -381,7 +381,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
if user != nil {
|
||||
return nil, ErrUserAlreadyExist
|
||||
}
|
||||
@ -409,7 +409,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
buckets.UnsafePutUser(as.lg, tx, newUser)
|
||||
schema.UnsafePutUser(as.lg, tx, newUser)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
@ -427,12 +427,12 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
|
||||
buckets.UnsafeDeleteUser(tx, r.Name)
|
||||
schema.UnsafeDeleteUser(tx, r.Name)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
@ -452,7 +452,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
@ -474,7 +474,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
|
||||
Options: user.Options,
|
||||
}
|
||||
|
||||
buckets.UnsafePutUser(as.lg, tx, updatedUser)
|
||||
schema.UnsafePutUser(as.lg, tx, updatedUser)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
@ -494,13 +494,13 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, r.User)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, r.User)
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
|
||||
if r.Role != rootRole {
|
||||
role := buckets.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
role := schema.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
@ -520,7 +520,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
||||
user.Roles = append(user.Roles, r.Role)
|
||||
sort.Strings(user.Roles)
|
||||
|
||||
buckets.UnsafePutUser(as.lg, tx, user)
|
||||
schema.UnsafePutUser(as.lg, tx, user)
|
||||
|
||||
as.invalidateCachedPerm(r.User)
|
||||
|
||||
@ -538,7 +538,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
|
||||
func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
tx.Unlock()
|
||||
|
||||
if user == nil {
|
||||
@ -553,7 +553,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
|
||||
func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
users := buckets.UnsafeGetAllUsers(as.lg, tx)
|
||||
users := schema.UnsafeGetAllUsers(as.lg, tx)
|
||||
tx.Unlock()
|
||||
|
||||
resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
|
||||
@ -577,7 +577,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, r.Name)
|
||||
if user == nil {
|
||||
return nil, ErrUserNotFound
|
||||
}
|
||||
@ -598,7 +598,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
|
||||
return nil, ErrRoleNotGranted
|
||||
}
|
||||
|
||||
buckets.UnsafePutUser(as.lg, tx, updatedUser)
|
||||
schema.UnsafePutUser(as.lg, tx, updatedUser)
|
||||
|
||||
as.invalidateCachedPerm(r.Name)
|
||||
|
||||
@ -621,7 +621,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
||||
|
||||
var resp pb.AuthRoleGetResponse
|
||||
|
||||
role := buckets.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
role := schema.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
@ -636,7 +636,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
|
||||
func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
roles := buckets.UnsafeGetAllRoles(as.lg, tx)
|
||||
roles := schema.UnsafeGetAllRoles(as.lg, tx)
|
||||
tx.Unlock()
|
||||
|
||||
resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
|
||||
@ -651,7 +651,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
role := buckets.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
role := schema.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
@ -670,7 +670,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
|
||||
return nil, ErrPermissionNotGranted
|
||||
}
|
||||
|
||||
buckets.UnsafePutRole(as.lg, tx, updatedRole)
|
||||
schema.UnsafePutRole(as.lg, tx, updatedRole)
|
||||
|
||||
// TODO(mitake): currently single role update invalidates every cache
|
||||
// It should be optimized.
|
||||
@ -697,14 +697,14 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
role := buckets.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
role := schema.UnsafeGetRole(as.lg, tx, r.Role)
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
|
||||
buckets.UnsafeDeleteRole(tx, r.Role)
|
||||
schema.UnsafeDeleteRole(tx, r.Role)
|
||||
|
||||
users := buckets.UnsafeGetAllUsers(as.lg, tx)
|
||||
users := schema.UnsafeGetAllUsers(as.lg, tx)
|
||||
for _, user := range users {
|
||||
updatedUser := &authpb.User{
|
||||
Name: user.Name,
|
||||
@ -722,7 +722,7 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
|
||||
continue
|
||||
}
|
||||
|
||||
buckets.UnsafePutUser(as.lg, tx, updatedUser)
|
||||
schema.UnsafePutUser(as.lg, tx, updatedUser)
|
||||
|
||||
as.invalidateCachedPerm(string(user.Name))
|
||||
}
|
||||
@ -742,7 +742,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
role := buckets.UnsafeGetRole(as.lg, tx, r.Name)
|
||||
role := schema.UnsafeGetRole(as.lg, tx, r.Name)
|
||||
if role != nil {
|
||||
return nil, ErrRoleAlreadyExist
|
||||
}
|
||||
@ -751,7 +751,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
|
||||
Name: []byte(r.Name),
|
||||
}
|
||||
|
||||
buckets.UnsafePutRole(as.lg, tx, newRole)
|
||||
schema.UnsafePutRole(as.lg, tx, newRole)
|
||||
|
||||
as.commitRevision(tx)
|
||||
|
||||
@ -786,7 +786,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
role := buckets.UnsafeGetRole(as.lg, tx, r.Name)
|
||||
role := schema.UnsafeGetRole(as.lg, tx, r.Name)
|
||||
if role == nil {
|
||||
return nil, ErrRoleNotFound
|
||||
}
|
||||
@ -810,7 +810,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
|
||||
sort.Sort(permSlice(role.KeyPermission))
|
||||
}
|
||||
|
||||
buckets.UnsafePutRole(as.lg, tx, role)
|
||||
schema.UnsafePutRole(as.lg, tx, role)
|
||||
|
||||
// TODO(mitake): currently single role update invalidates every cache
|
||||
// It should be optimized.
|
||||
@ -850,7 +850,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
user := buckets.UnsafeGetUser(as.lg, tx, userName)
|
||||
user := schema.UnsafeGetUser(as.lg, tx, userName)
|
||||
if user == nil {
|
||||
as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName))
|
||||
return ErrPermissionDenied
|
||||
@ -890,7 +890,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
|
||||
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
u := buckets.UnsafeGetUser(as.lg, tx, authInfo.Username)
|
||||
u := schema.UnsafeGetUser(as.lg, tx, authInfo.Username)
|
||||
tx.Unlock()
|
||||
|
||||
if u == nil {
|
||||
@ -930,11 +930,11 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
|
||||
buckets.UnsafeCreateAuthBucket(tx)
|
||||
tx.UnsafeCreateBucket(buckets.AuthUsers)
|
||||
tx.UnsafeCreateBucket(buckets.AuthRoles)
|
||||
schema.UnsafeCreateAuthBucket(tx)
|
||||
tx.UnsafeCreateBucket(schema.AuthUsers)
|
||||
tx.UnsafeCreateBucket(schema.AuthRoles)
|
||||
|
||||
enabled := buckets.UnsafeReadAuthEnabled(tx)
|
||||
enabled := schema.UnsafeReadAuthEnabled(tx)
|
||||
|
||||
as := &authStore{
|
||||
revision: getRevision(tx),
|
||||
@ -970,11 +970,11 @@ func hasRootRole(u *authpb.User) bool {
|
||||
|
||||
func (as *authStore) commitRevision(tx backend.BatchTx) {
|
||||
atomic.AddUint64(&as.revision, 1)
|
||||
buckets.UnsafeSaveAuthRevision(tx, as.Revision())
|
||||
schema.UnsafeSaveAuthRevision(tx, as.Revision())
|
||||
}
|
||||
|
||||
func getRevision(tx backend.BatchTx) uint64 {
|
||||
return buckets.UnsafeReadAuthRevision(tx)
|
||||
return schema.UnsafeReadAuthRevision(tx)
|
||||
}
|
||||
|
||||
func (as *authStore) setRevision(rev uint64) {
|
||||
@ -1169,7 +1169,7 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context {
|
||||
func (as *authStore) HasRole(user, role string) bool {
|
||||
tx := as.be.BatchTx()
|
||||
tx.Lock()
|
||||
u := buckets.UnsafeGetUser(as.lg, tx, user)
|
||||
u := schema.UnsafeGetUser(as.lg, tx, user)
|
||||
tx.Unlock()
|
||||
|
||||
if u == nil {
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/authpb"
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
|
@ -20,8 +20,8 @@ import (
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -59,7 +59,7 @@ func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
|
||||
return m
|
||||
}
|
||||
|
||||
buckets.MustPutAlarm(a.lg, a.bg.Backend().BatchTx(), newAlarm)
|
||||
schema.MustPutAlarm(a.lg, a.bg.Backend().BatchTx(), newAlarm)
|
||||
return newAlarm
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
|
||||
|
||||
delete(t, id)
|
||||
|
||||
buckets.MustDeleteAlarm(a.lg, a.bg.Backend().BatchTx(), m)
|
||||
schema.MustDeleteAlarm(a.lg, a.bg.Backend().BatchTx(), m)
|
||||
return m
|
||||
}
|
||||
|
||||
@ -105,8 +105,8 @@ func (a *AlarmStore) restore() error {
|
||||
tx := b.BatchTx()
|
||||
|
||||
tx.Lock()
|
||||
buckets.UnsafeCreateAlarmBucket(tx)
|
||||
ms, err := buckets.UnsafeGetAllAlarms(tx)
|
||||
schema.UnsafeCreateAlarmBucket(tx)
|
||||
ms, err := schema.UnsafeGetAllAlarms(tx)
|
||||
tx.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
"go.uber.org/zap"
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"time"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
"go.uber.org/zap"
|
||||
|
@ -27,9 +27,9 @@ import (
|
||||
"go.etcd.io/etcd/raft/v3"
|
||||
"go.etcd.io/etcd/server/v3/auth"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -101,7 +101,7 @@ func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRe
|
||||
const snapshotSendBufferSize = 32 * 1024
|
||||
|
||||
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
ver := buckets.ReadStorageVersion(ms.bg.Backend().ReadTx())
|
||||
ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx())
|
||||
storageVersion := ""
|
||||
if ver != nil {
|
||||
storageVersion = ver.String()
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/server/v3/auth"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/auth"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
)
|
||||
|
||||
type authApplierV3 struct {
|
||||
|
@ -22,8 +22,8 @@ import (
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/config"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -99,7 +99,7 @@ func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
|
||||
func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) {
|
||||
consistentIndex := uint64(0)
|
||||
if beExist {
|
||||
consistentIndex, _ = buckets.ReadConsistentIndex(oldbe.BatchTx())
|
||||
consistentIndex, _ = schema.ReadConsistentIndex(oldbe.BatchTx())
|
||||
}
|
||||
if snapshot.Metadata.Index <= consistentIndex {
|
||||
return oldbe, nil
|
||||
|
599
server/etcdserver/bootstrap.go
Normal file
599
server/etcdserver/bootstrap.go
Normal file
@ -0,0 +1,599 @@
|
||||
// Copyright 2021 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/dustin/go-humanize"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/fileutil"
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/pkg/v3/pbutil"
|
||||
"go.etcd.io/etcd/raft/v3"
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/config"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.etcd.io/etcd/server/v3/wal"
|
||||
"go.etcd.io/etcd/server/v3/wal/walpb"
|
||||
)
|
||||
|
||||
func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
|
||||
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
|
||||
|
||||
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
|
||||
cfg.Logger.Warn(
|
||||
"exceeded recommended request limit",
|
||||
zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
|
||||
zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
|
||||
zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
|
||||
zap.String("recommended-request-size", recommendedMaxRequestBytesString),
|
||||
)
|
||||
}
|
||||
|
||||
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
|
||||
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
||||
}
|
||||
|
||||
haveWAL := wal.Exist(cfg.WALDir())
|
||||
ss := bootstrapSnapshot(cfg)
|
||||
|
||||
be, ci, beExist, beHooks, err := bootstrapBackend(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
be.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case !haveWAL && !cfg.NewCluster:
|
||||
b, err = bootstrapExistingClusterNoWAL(cfg, prt, st, be)
|
||||
case !haveWAL && cfg.NewCluster:
|
||||
b, err = bootstrapNewClusterNoWAL(cfg, prt, st, be)
|
||||
case haveWAL:
|
||||
b, err = bootstrapWithWAL(cfg, st, be, ss, beExist, beHooks, ci)
|
||||
default:
|
||||
be.Close()
|
||||
return nil, fmt.Errorf("unsupported bootstrap config")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
|
||||
return nil, fmt.Errorf("cannot access member directory: %v", terr)
|
||||
}
|
||||
b.prt = prt
|
||||
b.ci = ci
|
||||
b.st = st
|
||||
b.be = be
|
||||
b.ss = ss
|
||||
b.beHooks = beHooks
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type bootstrappedServer struct {
|
||||
raft *bootstrappedRaft
|
||||
remotes []*membership.Member
|
||||
prt http.RoundTripper
|
||||
ci cindex.ConsistentIndexer
|
||||
st v2store.Store
|
||||
be backend.Backend
|
||||
ss *snap.Snapshotter
|
||||
beHooks *backendHooks
|
||||
}
|
||||
|
||||
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
|
||||
if err := fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
|
||||
cfg.Logger.Fatal(
|
||||
"failed to create snapshot directory",
|
||||
zap.String("path", cfg.SnapDir()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
|
||||
return strings.HasPrefix(fileName, "tmp")
|
||||
}); err != nil {
|
||||
cfg.Logger.Error(
|
||||
"failed to remove temp file(s) in snapshot directory",
|
||||
zap.String("path", cfg.SnapDir()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
return snap.New(cfg.Logger, cfg.SnapDir())
|
||||
}
|
||||
|
||||
func bootstrapBackend(cfg config.ServerConfig) (be backend.Backend, ci cindex.ConsistentIndexer, beExist bool, beHooks *backendHooks, err error) {
|
||||
beExist = fileutil.Exist(cfg.BackendPath())
|
||||
ci = cindex.NewConsistentIndex(nil)
|
||||
beHooks = &backendHooks{lg: cfg.Logger, indexer: ci}
|
||||
be = openBackend(cfg, beHooks)
|
||||
ci.SetBackend(be)
|
||||
schema.CreateMetaBucket(be.BatchTx())
|
||||
if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
|
||||
err := maybeDefragBackend(cfg, be)
|
||||
if err != nil {
|
||||
be.Close()
|
||||
return nil, nil, false, nil, err
|
||||
}
|
||||
}
|
||||
cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
|
||||
return be, ci, beExist, beHooks, nil
|
||||
}
|
||||
|
||||
func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
|
||||
size := be.Size()
|
||||
sizeInUse := be.SizeInUse()
|
||||
freeableMemory := uint(size - sizeInUse)
|
||||
thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
|
||||
if freeableMemory < thresholdBytes {
|
||||
cfg.Logger.Info("Skipping defragmentation",
|
||||
zap.Int64("current-db-size-bytes", size),
|
||||
zap.String("current-db-size", humanize.Bytes(uint64(size))),
|
||||
zap.Int64("current-db-size-in-use-bytes", sizeInUse),
|
||||
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
|
||||
zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
|
||||
zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
return be.Defrag()
|
||||
}
|
||||
|
||||
func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
|
||||
if err := cfg.VerifyJoinExisting(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
|
||||
if gerr != nil {
|
||||
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
|
||||
}
|
||||
if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
|
||||
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
|
||||
}
|
||||
if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
|
||||
return nil, fmt.Errorf("incompatible with current running cluster")
|
||||
}
|
||||
|
||||
remotes := existingCluster.Members()
|
||||
cl.SetID(types.ID(0), existingCluster.ID())
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(schema.NewMembershipStore(cfg.Logger, be))
|
||||
br := bootstrapRaftFromCluster(cfg, cl, nil)
|
||||
cl.SetID(br.wal.id, existingCluster.ID())
|
||||
return &bootstrappedServer{
|
||||
raft: br,
|
||||
remotes: remotes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
|
||||
if err := cfg.VerifyBootstrap(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := cl.MemberByName(cfg.Name)
|
||||
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
|
||||
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
|
||||
}
|
||||
if cfg.ShouldDiscover() {
|
||||
var str string
|
||||
str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
|
||||
if err != nil {
|
||||
return nil, &DiscoveryError{Op: "join", Err: err}
|
||||
}
|
||||
var urlsmap types.URLsMap
|
||||
urlsmap, err = types.NewURLsMap(str)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.CheckDuplicateURL(urlsmap) {
|
||||
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
|
||||
}
|
||||
if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(schema.NewMembershipStore(cfg.Logger, be))
|
||||
br := bootstrapRaftFromCluster(cfg, cl, cl.MemberIDs())
|
||||
cl.SetID(br.wal.id, cl.ID())
|
||||
return &bootstrappedServer{
|
||||
remotes: nil,
|
||||
raft: br,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Backend, ss *snap.Snapshotter, beExist bool, beHooks *backendHooks, ci cindex.ConsistentIndexer) (*bootstrappedServer, error) {
|
||||
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
|
||||
return nil, fmt.Errorf("cannot write to member directory: %v", err)
|
||||
}
|
||||
|
||||
if err := fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
|
||||
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
|
||||
}
|
||||
|
||||
if cfg.ShouldDiscover() {
|
||||
cfg.Logger.Warn(
|
||||
"discovery token is ignored since cluster already initialized; valid logs are found",
|
||||
zap.String("wal-dir", cfg.WALDir()),
|
||||
)
|
||||
}
|
||||
|
||||
// Find a snapshot to start/restart a raft node
|
||||
walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
|
||||
// wal log entries
|
||||
snapshot, err := ss.LoadNewestAvailable(walSnaps)
|
||||
if err != nil && err != snap.ErrNoSnapshot {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if snapshot != nil {
|
||||
if err = st.Recovery(snapshot.Data); err != nil {
|
||||
cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
|
||||
}
|
||||
|
||||
if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
|
||||
cfg.Logger.Error("illegal v2store content", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.Logger.Info(
|
||||
"recovered v2 store from snapshot",
|
||||
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
|
||||
zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
|
||||
)
|
||||
|
||||
if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
|
||||
cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
|
||||
}
|
||||
s1, s2 := be.Size(), be.SizeInUse()
|
||||
cfg.Logger.Info(
|
||||
"recovered v3 backend from snapshot",
|
||||
zap.Int64("backend-size-bytes", s1),
|
||||
zap.String("backend-size", humanize.Bytes(uint64(s1))),
|
||||
zap.Int64("backend-size-in-use-bytes", s2),
|
||||
zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
|
||||
)
|
||||
if beExist {
|
||||
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
|
||||
// etcd from pre-3.0 release.
|
||||
kvindex := ci.ConsistentIndex()
|
||||
if kvindex < snapshot.Metadata.Index {
|
||||
if kvindex != 0 {
|
||||
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
|
||||
}
|
||||
cfg.Logger.Warn(
|
||||
"consistent index was never saved",
|
||||
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
|
||||
}
|
||||
|
||||
r := &bootstrappedServer{}
|
||||
if !cfg.ForceNewCluster {
|
||||
r.raft = bootstrapRaftFromWal(cfg, snapshot)
|
||||
} else {
|
||||
r.raft = bootstrapRaftFromWalStandalone(cfg, snapshot)
|
||||
}
|
||||
|
||||
r.raft.cl.SetStore(st)
|
||||
r.raft.cl.SetBackend(schema.NewMembershipStore(cfg.Logger, be))
|
||||
r.raft.cl.Recover(api.UpdateCapability)
|
||||
if r.raft.cl.Version() != nil && !r.raft.cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
|
||||
bepath := cfg.BackendPath()
|
||||
os.RemoveAll(bepath)
|
||||
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) *bootstrappedRaft {
|
||||
member := cl.MemberByName(cfg.Name)
|
||||
id := member.ID
|
||||
wal := bootstrapNewWAL(cfg, id, cl.ID())
|
||||
peers := make([]raft.Peer, len(ids))
|
||||
for i, id := range ids {
|
||||
var ctx []byte
|
||||
ctx, err := json.Marshal((*cl).Member(id))
|
||||
if err != nil {
|
||||
cfg.Logger.Panic("failed to marshal member", zap.Error(err))
|
||||
}
|
||||
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
|
||||
}
|
||||
cfg.Logger.Info(
|
||||
"starting local member",
|
||||
zap.String("local-member-id", id.String()),
|
||||
zap.String("cluster-id", cl.ID().String()),
|
||||
)
|
||||
s := wal.MemoryStorage()
|
||||
return &bootstrappedRaft{
|
||||
lg: cfg.Logger,
|
||||
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
|
||||
cl: cl,
|
||||
config: raftConfig(cfg, uint64(wal.id), s),
|
||||
peers: peers,
|
||||
storage: s,
|
||||
wal: wal,
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapRaftFromWal(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
|
||||
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
|
||||
|
||||
cfg.Logger.Info(
|
||||
"restarting local member",
|
||||
zap.String("cluster-id", wal.cid.String()),
|
||||
zap.String("local-member-id", wal.id.String()),
|
||||
zap.Uint64("commit-index", wal.st.Commit),
|
||||
)
|
||||
cl := membership.NewCluster(cfg.Logger)
|
||||
cl.SetID(wal.id, wal.cid)
|
||||
s := wal.MemoryStorage()
|
||||
return &bootstrappedRaft{
|
||||
lg: cfg.Logger,
|
||||
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
|
||||
cl: cl,
|
||||
config: raftConfig(cfg, uint64(wal.id), s),
|
||||
storage: s,
|
||||
wal: wal,
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapRaftFromWalStandalone(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
|
||||
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
|
||||
|
||||
// discard the previously uncommitted entries
|
||||
wal.ents = wal.CommitedEntries()
|
||||
entries := wal.ConfigChangeEntries()
|
||||
// force commit config change entries
|
||||
wal.AppendAndCommitEntries(entries)
|
||||
|
||||
cfg.Logger.Info(
|
||||
"forcing restart member",
|
||||
zap.String("cluster-id", wal.cid.String()),
|
||||
zap.String("local-member-id", wal.id.String()),
|
||||
zap.Uint64("commit-index", wal.st.Commit),
|
||||
)
|
||||
|
||||
cl := membership.NewCluster(cfg.Logger)
|
||||
cl.SetID(wal.id, wal.cid)
|
||||
s := wal.MemoryStorage()
|
||||
return &bootstrappedRaft{
|
||||
lg: cfg.Logger,
|
||||
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
|
||||
cl: cl,
|
||||
config: raftConfig(cfg, uint64(wal.id), s),
|
||||
storage: s,
|
||||
wal: wal,
|
||||
}
|
||||
}
|
||||
|
||||
func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config {
|
||||
return &raft.Config{
|
||||
ID: id,
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
CheckQuorum: true,
|
||||
PreVote: cfg.PreVote,
|
||||
Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
|
||||
}
|
||||
}
|
||||
|
||||
type bootstrappedRaft struct {
|
||||
lg *zap.Logger
|
||||
heartbeat time.Duration
|
||||
|
||||
peers []raft.Peer
|
||||
config *raft.Config
|
||||
cl *membership.RaftCluster
|
||||
storage *raft.MemoryStorage
|
||||
wal *bootstrappedWAL
|
||||
}
|
||||
|
||||
func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter) *raftNode {
|
||||
var n raft.Node
|
||||
if len(b.peers) == 0 {
|
||||
n = raft.RestartNode(b.config)
|
||||
} else {
|
||||
n = raft.StartNode(b.config, b.peers)
|
||||
}
|
||||
raftStatusMu.Lock()
|
||||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
return newRaftNode(
|
||||
raftNodeConfig{
|
||||
lg: b.lg,
|
||||
isIDRemoved: func(id uint64) bool { return b.cl.IsIDRemoved(types.ID(id)) },
|
||||
Node: n,
|
||||
heartbeat: b.heartbeat,
|
||||
raftStorage: b.storage,
|
||||
storage: NewStorage(b.wal.w, ss),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// bootstrapWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
|
||||
// after the position of the given snap in the WAL.
|
||||
// The snap must have been previously saved to the WAL, or this call will panic.
|
||||
func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Snapshot, unsafeNoFsync bool) *bootstrappedWAL {
|
||||
var walsnap walpb.Snapshot
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
repaired := false
|
||||
for {
|
||||
w, err := wal.Open(lg, waldir, walsnap)
|
||||
if err != nil {
|
||||
lg.Fatal("failed to open WAL", zap.Error(err))
|
||||
}
|
||||
if unsafeNoFsync {
|
||||
w.SetUnsafeNoFsync()
|
||||
}
|
||||
wmetadata, st, ents, err := w.ReadAll()
|
||||
if err != nil {
|
||||
w.Close()
|
||||
// we can only repair ErrUnexpectedEOF and we never repair twice.
|
||||
if repaired || err != io.ErrUnexpectedEOF {
|
||||
lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
|
||||
}
|
||||
if !wal.Repair(lg, waldir) {
|
||||
lg.Fatal("failed to repair WAL", zap.Error(err))
|
||||
} else {
|
||||
lg.Info("repaired WAL", zap.Error(err))
|
||||
repaired = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
var metadata etcdserverpb.Metadata
|
||||
pbutil.MustUnmarshal(&metadata, wmetadata)
|
||||
id := types.ID(metadata.NodeID)
|
||||
cid := types.ID(metadata.ClusterID)
|
||||
return &bootstrappedWAL{
|
||||
lg: lg,
|
||||
w: w,
|
||||
id: id,
|
||||
cid: cid,
|
||||
st: &st,
|
||||
ents: ents,
|
||||
snapshot: snapshot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapNewWAL(cfg config.ServerConfig, nodeID, clusterID types.ID) *bootstrappedWAL {
|
||||
metadata := pbutil.MustMarshal(
|
||||
&etcdserverpb.Metadata{
|
||||
NodeID: uint64(nodeID),
|
||||
ClusterID: uint64(clusterID),
|
||||
},
|
||||
)
|
||||
w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
|
||||
if err != nil {
|
||||
cfg.Logger.Panic("failed to create WAL", zap.Error(err))
|
||||
}
|
||||
if cfg.UnsafeNoFsync {
|
||||
w.SetUnsafeNoFsync()
|
||||
}
|
||||
return &bootstrappedWAL{
|
||||
lg: cfg.Logger,
|
||||
w: w,
|
||||
id: nodeID,
|
||||
cid: clusterID,
|
||||
}
|
||||
}
|
||||
|
||||
type bootstrappedWAL struct {
|
||||
lg *zap.Logger
|
||||
|
||||
w *wal.WAL
|
||||
id, cid types.ID
|
||||
st *raftpb.HardState
|
||||
ents []raftpb.Entry
|
||||
snapshot *raftpb.Snapshot
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
|
||||
s := raft.NewMemoryStorage()
|
||||
if wal.snapshot != nil {
|
||||
s.ApplySnapshot(*wal.snapshot)
|
||||
}
|
||||
if wal.st != nil {
|
||||
s.SetHardState(*wal.st)
|
||||
}
|
||||
if len(wal.ents) != 0 {
|
||||
s.Append(wal.ents)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
|
||||
for i, ent := range wal.ents {
|
||||
if ent.Index > wal.st.Commit {
|
||||
wal.lg.Info(
|
||||
"discarding uncommitted WAL entries",
|
||||
zap.Uint64("entry-index", ent.Index),
|
||||
zap.Uint64("commit-index-from-wal", wal.st.Commit),
|
||||
zap.Int("number-of-discarded-entries", len(wal.ents)-i),
|
||||
)
|
||||
return wal.ents[:i]
|
||||
}
|
||||
}
|
||||
return wal.ents
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) ConfigChangeEntries() []raftpb.Entry {
|
||||
return createConfigChangeEnts(
|
||||
wal.lg,
|
||||
getIDs(wal.lg, wal.snapshot, wal.ents),
|
||||
uint64(wal.id),
|
||||
wal.st.Term,
|
||||
wal.st.Commit,
|
||||
)
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) {
|
||||
wal.ents = append(wal.ents, ents...)
|
||||
err := wal.w.Save(raftpb.HardState{}, ents)
|
||||
if err != nil {
|
||||
wal.lg.Fatal("failed to save hard state and entries", zap.Error(err))
|
||||
}
|
||||
if len(wal.ents) != 0 {
|
||||
wal.st.Commit = wal.ents[len(wal.ents)-1].Index
|
||||
}
|
||||
}
|
@ -18,8 +18,8 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
type Backend interface {
|
||||
@ -73,7 +73,7 @@ func (ci *consistentIndex) ConsistentIndex() uint64 {
|
||||
ci.mutex.Lock()
|
||||
defer ci.mutex.Unlock()
|
||||
|
||||
v, term := buckets.ReadConsistentIndex(ci.be.BatchTx())
|
||||
v, term := schema.ReadConsistentIndex(ci.be.BatchTx())
|
||||
ci.SetConsistentIndex(v, term)
|
||||
return v
|
||||
}
|
||||
@ -86,7 +86,7 @@ func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) {
|
||||
func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) {
|
||||
index := atomic.LoadUint64(&ci.consistentIndex)
|
||||
term := atomic.LoadUint64(&ci.term)
|
||||
buckets.UnsafeUpdateConsistentIndex(tx, index, term, true)
|
||||
schema.UnsafeUpdateConsistentIndex(tx, index, term, true)
|
||||
}
|
||||
|
||||
func (ci *consistentIndex) SetBackend(be Backend) {
|
||||
@ -119,5 +119,5 @@ func (f *fakeConsistentIndex) SetBackend(_ Backend) {}
|
||||
func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
buckets.UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow)
|
||||
schema.UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow)
|
||||
}
|
||||
|
@ -20,9 +20,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
// TestConsistentIndex ensures that LoadConsistentIndex/Save/ConsistentIndex and backend.BatchTx can work well together.
|
||||
@ -37,7 +37,7 @@ func TestConsistentIndex(t *testing.T) {
|
||||
}
|
||||
tx.Lock()
|
||||
|
||||
buckets.UnsafeCreateMetaBucket(tx)
|
||||
schema.UnsafeCreateMetaBucket(tx)
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
||||
r := uint64(7890123)
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -29,10 +29,8 @@ import (
|
||||
"go.etcd.io/etcd/pkg/v3/pbutil"
|
||||
"go.etcd.io/etcd/raft/v3"
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/config"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -418,134 +416,6 @@ func (r *raftNode) advanceTicks(ticks int) {
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) *bootstrappedRaft {
|
||||
member := cl.MemberByName(cfg.Name)
|
||||
id := member.ID
|
||||
wal := bootstrapNewWAL(cfg, id, cl.ID())
|
||||
peers := make([]raft.Peer, len(ids))
|
||||
for i, id := range ids {
|
||||
var ctx []byte
|
||||
ctx, err := json.Marshal((*cl).Member(id))
|
||||
if err != nil {
|
||||
cfg.Logger.Panic("failed to marshal member", zap.Error(err))
|
||||
}
|
||||
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
|
||||
}
|
||||
cfg.Logger.Info(
|
||||
"starting local member",
|
||||
zap.String("local-member-id", id.String()),
|
||||
zap.String("cluster-id", cl.ID().String()),
|
||||
)
|
||||
s := wal.MemoryStorage()
|
||||
return &bootstrappedRaft{
|
||||
lg: cfg.Logger,
|
||||
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
|
||||
cl: cl,
|
||||
config: raftConfig(cfg, uint64(wal.id), s),
|
||||
peers: peers,
|
||||
storage: s,
|
||||
wal: wal,
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapRaftFromWal(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
|
||||
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
|
||||
|
||||
cfg.Logger.Info(
|
||||
"restarting local member",
|
||||
zap.String("cluster-id", wal.cid.String()),
|
||||
zap.String("local-member-id", wal.id.String()),
|
||||
zap.Uint64("commit-index", wal.st.Commit),
|
||||
)
|
||||
cl := membership.NewCluster(cfg.Logger)
|
||||
cl.SetID(wal.id, wal.cid)
|
||||
s := wal.MemoryStorage()
|
||||
return &bootstrappedRaft{
|
||||
lg: cfg.Logger,
|
||||
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
|
||||
cl: cl,
|
||||
config: raftConfig(cfg, uint64(wal.id), s),
|
||||
storage: s,
|
||||
wal: wal,
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapRaftFromWalStandalone(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
|
||||
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
|
||||
|
||||
// discard the previously uncommitted entries
|
||||
wal.ents = wal.CommitedEntries()
|
||||
entries := wal.ConfigChangeEntries()
|
||||
// force commit config change entries
|
||||
wal.AppendAndCommitEntries(entries)
|
||||
|
||||
cfg.Logger.Info(
|
||||
"forcing restart member",
|
||||
zap.String("cluster-id", wal.cid.String()),
|
||||
zap.String("local-member-id", wal.id.String()),
|
||||
zap.Uint64("commit-index", wal.st.Commit),
|
||||
)
|
||||
|
||||
cl := membership.NewCluster(cfg.Logger)
|
||||
cl.SetID(wal.id, wal.cid)
|
||||
s := wal.MemoryStorage()
|
||||
return &bootstrappedRaft{
|
||||
lg: cfg.Logger,
|
||||
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
|
||||
cl: cl,
|
||||
config: raftConfig(cfg, uint64(wal.id), s),
|
||||
storage: s,
|
||||
wal: wal,
|
||||
}
|
||||
}
|
||||
|
||||
func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config {
|
||||
return &raft.Config{
|
||||
ID: id,
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
CheckQuorum: true,
|
||||
PreVote: cfg.PreVote,
|
||||
Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
|
||||
}
|
||||
}
|
||||
|
||||
type bootstrappedRaft struct {
|
||||
lg *zap.Logger
|
||||
heartbeat time.Duration
|
||||
|
||||
peers []raft.Peer
|
||||
config *raft.Config
|
||||
cl *membership.RaftCluster
|
||||
storage *raft.MemoryStorage
|
||||
wal *bootstrappedWAL
|
||||
}
|
||||
|
||||
func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter) *raftNode {
|
||||
var n raft.Node
|
||||
if len(b.peers) == 0 {
|
||||
n = raft.RestartNode(b.config)
|
||||
} else {
|
||||
n = raft.StartNode(b.config, b.peers)
|
||||
}
|
||||
raftStatusMu.Lock()
|
||||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
return newRaftNode(
|
||||
raftNodeConfig{
|
||||
lg: b.lg,
|
||||
isIDRemoved: func(id uint64) bool { return b.cl.IsIDRemoved(types.ID(id)) },
|
||||
Node: n,
|
||||
heartbeat: b.heartbeat,
|
||||
raftStorage: b.storage,
|
||||
storage: NewStorage(b.wal.w, ss),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// getIDs returns an ordered set of IDs included in the given snapshot and
|
||||
// the entries. The given snapshot/entries can contain three kinds of
|
||||
// ID-related entry:
|
||||
|
@ -22,11 +22,9 @@ import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -55,7 +53,6 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
|
||||
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
|
||||
@ -65,10 +62,9 @@ import (
|
||||
serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/lease/leasehttp"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/wal"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -296,8 +292,8 @@ type EtcdServer struct {
|
||||
|
||||
*AccessController
|
||||
|
||||
// Ensure that storage version is updated only once.
|
||||
storageVersionUpdated sync.Once
|
||||
// Ensure that storage schema is updated only once.
|
||||
updateStorageSchema sync.Once
|
||||
}
|
||||
|
||||
type backendHooks struct {
|
||||
@ -317,7 +313,7 @@ func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) {
|
||||
bh.confStateLock.Lock()
|
||||
defer bh.confStateLock.Unlock()
|
||||
if bh.confStateDirty {
|
||||
buckets.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
|
||||
schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
|
||||
// save bh.confState
|
||||
bh.confStateDirty = false
|
||||
}
|
||||
@ -330,280 +326,6 @@ func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) {
|
||||
bh.confStateDirty = true
|
||||
}
|
||||
|
||||
func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
|
||||
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
|
||||
|
||||
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
|
||||
cfg.Logger.Warn(
|
||||
"exceeded recommended request limit",
|
||||
zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
|
||||
zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
|
||||
zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
|
||||
zap.String("recommended-request-size", recommendedMaxRequestBytesString),
|
||||
)
|
||||
}
|
||||
|
||||
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
|
||||
return nil, fmt.Errorf("cannot access data directory: %v", terr)
|
||||
}
|
||||
|
||||
haveWAL := wal.Exist(cfg.WALDir())
|
||||
ss := bootstrapSnapshot(cfg)
|
||||
|
||||
be, ci, beExist, beHooks, err := bootstrapBackend(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
be.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case !haveWAL && !cfg.NewCluster:
|
||||
b, err = bootstrapExistingClusterNoWAL(cfg, prt, st, be)
|
||||
case !haveWAL && cfg.NewCluster:
|
||||
b, err = bootstrapNewClusterNoWAL(cfg, prt, st, be)
|
||||
case haveWAL:
|
||||
b, err = bootstrapWithWAL(cfg, st, be, ss, beExist, beHooks, ci)
|
||||
default:
|
||||
be.Close()
|
||||
return nil, fmt.Errorf("unsupported bootstrap config")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
|
||||
return nil, fmt.Errorf("cannot access member directory: %v", terr)
|
||||
}
|
||||
b.prt = prt
|
||||
b.ci = ci
|
||||
b.st = st
|
||||
b.be = be
|
||||
b.ss = ss
|
||||
b.beHooks = beHooks
|
||||
return b, nil
|
||||
}
|
||||
|
||||
type bootstrappedServer struct {
|
||||
raft *bootstrappedRaft
|
||||
remotes []*membership.Member
|
||||
prt http.RoundTripper
|
||||
ci cindex.ConsistentIndexer
|
||||
st v2store.Store
|
||||
be backend.Backend
|
||||
ss *snap.Snapshotter
|
||||
beHooks *backendHooks
|
||||
}
|
||||
|
||||
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
|
||||
if err := fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
|
||||
cfg.Logger.Fatal(
|
||||
"failed to create snapshot directory",
|
||||
zap.String("path", cfg.SnapDir()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
|
||||
return strings.HasPrefix(fileName, "tmp")
|
||||
}); err != nil {
|
||||
cfg.Logger.Error(
|
||||
"failed to remove temp file(s) in snapshot directory",
|
||||
zap.String("path", cfg.SnapDir()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
return snap.New(cfg.Logger, cfg.SnapDir())
|
||||
}
|
||||
|
||||
func bootstrapBackend(cfg config.ServerConfig) (be backend.Backend, ci cindex.ConsistentIndexer, beExist bool, beHooks *backendHooks, err error) {
|
||||
beExist = fileutil.Exist(cfg.BackendPath())
|
||||
ci = cindex.NewConsistentIndex(nil)
|
||||
beHooks = &backendHooks{lg: cfg.Logger, indexer: ci}
|
||||
be = openBackend(cfg, beHooks)
|
||||
ci.SetBackend(be)
|
||||
buckets.CreateMetaBucket(be.BatchTx())
|
||||
if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
|
||||
err := maybeDefragBackend(cfg, be)
|
||||
if err != nil {
|
||||
be.Close()
|
||||
return nil, nil, false, nil, err
|
||||
}
|
||||
}
|
||||
cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
|
||||
return be, ci, beExist, beHooks, nil
|
||||
}
|
||||
|
||||
func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
|
||||
if err := cfg.VerifyJoinExisting(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
|
||||
if gerr != nil {
|
||||
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
|
||||
}
|
||||
if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
|
||||
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
|
||||
}
|
||||
if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
|
||||
return nil, fmt.Errorf("incompatible with current running cluster")
|
||||
}
|
||||
|
||||
remotes := existingCluster.Members()
|
||||
cl.SetID(types.ID(0), existingCluster.ID())
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(buckets.NewMembershipStore(cfg.Logger, be))
|
||||
br := bootstrapRaftFromCluster(cfg, cl, nil)
|
||||
cl.SetID(br.wal.id, existingCluster.ID())
|
||||
return &bootstrappedServer{
|
||||
raft: br,
|
||||
remotes: remotes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
|
||||
if err := cfg.VerifyBootstrap(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := cl.MemberByName(cfg.Name)
|
||||
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
|
||||
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
|
||||
}
|
||||
if cfg.ShouldDiscover() {
|
||||
var str string
|
||||
str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
|
||||
if err != nil {
|
||||
return nil, &DiscoveryError{Op: "join", Err: err}
|
||||
}
|
||||
var urlsmap types.URLsMap
|
||||
urlsmap, err = types.NewURLsMap(str)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.CheckDuplicateURL(urlsmap) {
|
||||
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
|
||||
}
|
||||
if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
cl.SetStore(st)
|
||||
cl.SetBackend(buckets.NewMembershipStore(cfg.Logger, be))
|
||||
br := bootstrapRaftFromCluster(cfg, cl, cl.MemberIDs())
|
||||
cl.SetID(br.wal.id, cl.ID())
|
||||
return &bootstrappedServer{
|
||||
remotes: nil,
|
||||
raft: br,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Backend, ss *snap.Snapshotter, beExist bool, beHooks *backendHooks, ci cindex.ConsistentIndexer) (*bootstrappedServer, error) {
|
||||
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
|
||||
return nil, fmt.Errorf("cannot write to member directory: %v", err)
|
||||
}
|
||||
|
||||
if err := fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
|
||||
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
|
||||
}
|
||||
|
||||
if cfg.ShouldDiscover() {
|
||||
cfg.Logger.Warn(
|
||||
"discovery token is ignored since cluster already initialized; valid logs are found",
|
||||
zap.String("wal-dir", cfg.WALDir()),
|
||||
)
|
||||
}
|
||||
|
||||
// Find a snapshot to start/restart a raft node
|
||||
walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
|
||||
// wal log entries
|
||||
snapshot, err := ss.LoadNewestAvailable(walSnaps)
|
||||
if err != nil && err != snap.ErrNoSnapshot {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if snapshot != nil {
|
||||
if err = st.Recovery(snapshot.Data); err != nil {
|
||||
cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
|
||||
}
|
||||
|
||||
if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
|
||||
cfg.Logger.Error("illegal v2store content", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.Logger.Info(
|
||||
"recovered v2 store from snapshot",
|
||||
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
|
||||
zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
|
||||
)
|
||||
|
||||
if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
|
||||
cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
|
||||
}
|
||||
s1, s2 := be.Size(), be.SizeInUse()
|
||||
cfg.Logger.Info(
|
||||
"recovered v3 backend from snapshot",
|
||||
zap.Int64("backend-size-bytes", s1),
|
||||
zap.String("backend-size", humanize.Bytes(uint64(s1))),
|
||||
zap.Int64("backend-size-in-use-bytes", s2),
|
||||
zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
|
||||
)
|
||||
if beExist {
|
||||
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
|
||||
// etcd from pre-3.0 release.
|
||||
kvindex := ci.ConsistentIndex()
|
||||
if kvindex < snapshot.Metadata.Index {
|
||||
if kvindex != 0 {
|
||||
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
|
||||
}
|
||||
cfg.Logger.Warn(
|
||||
"consistent index was never saved",
|
||||
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
|
||||
}
|
||||
|
||||
r := &bootstrappedServer{}
|
||||
if !cfg.ForceNewCluster {
|
||||
r.raft = bootstrapRaftFromWal(cfg, snapshot)
|
||||
} else {
|
||||
r.raft = bootstrapRaftFromWalStandalone(cfg, snapshot)
|
||||
}
|
||||
|
||||
r.raft.cl.SetStore(st)
|
||||
r.raft.cl.SetBackend(buckets.NewMembershipStore(cfg.Logger, be))
|
||||
r.raft.cl.Recover(api.UpdateCapability)
|
||||
if r.raft.cl.Version() != nil && !r.raft.cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
|
||||
bepath := cfg.BackendPath()
|
||||
os.RemoveAll(bepath)
|
||||
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// NewServer creates a new EtcdServer from the supplied configuration. The
|
||||
// configuration is considered static for the lifetime of the EtcdServer.
|
||||
func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
|
||||
@ -1353,7 +1075,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
|
||||
|
||||
lg.Info("restored v2 store")
|
||||
|
||||
s.cluster.SetBackend(buckets.NewMembershipStore(lg, newbe))
|
||||
s.cluster.SetBackend(schema.NewMembershipStore(lg, newbe))
|
||||
|
||||
lg.Info("restoring cluster configuration")
|
||||
|
||||
@ -2414,8 +2136,8 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
|
||||
"saved snapshot",
|
||||
zap.Uint64("snapshot-index", snap.Metadata.Index),
|
||||
)
|
||||
s.storageVersionUpdated.Do(func() {
|
||||
err := serverversion.UpdateStorageVersion(s.lg, s.be.BatchTx())
|
||||
s.updateStorageSchema.Do(func() {
|
||||
err := schema.UpdateStorageSchema(s.lg, s.be.BatchTx())
|
||||
if err != nil {
|
||||
s.lg.Warn("failed to update storage version", zap.Error(err))
|
||||
}
|
||||
@ -2693,22 +2415,3 @@ func (s *EtcdServer) IsMemberExist(id types.ID) bool {
|
||||
func (s *EtcdServer) raftStatus() raft.Status {
|
||||
return s.r.Node.Status()
|
||||
}
|
||||
|
||||
func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
|
||||
size := be.Size()
|
||||
sizeInUse := be.SizeInUse()
|
||||
freeableMemory := uint(size - sizeInUse)
|
||||
thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
|
||||
if freeableMemory < thresholdBytes {
|
||||
cfg.Logger.Info("Skipping defragmentation",
|
||||
zap.Int64("current-db-size-bytes", size),
|
||||
zap.String("current-db-size", humanize.Bytes(uint64(size))),
|
||||
zap.Int64("current-db-size-in-use-bytes", sizeInUse),
|
||||
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
|
||||
zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
|
||||
zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
return be.Defrag()
|
||||
}
|
||||
|
@ -51,9 +51,9 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/mock/mockstorage"
|
||||
"go.etcd.io/etcd/server/v3/mock/mockstore"
|
||||
"go.etcd.io/etcd/server/v3/mock/mockwait"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
@ -651,7 +651,7 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
|
||||
|
||||
be, _ := betesting.NewDefaultTmpBackend(t)
|
||||
defer betesting.Close(t, be)
|
||||
buckets.CreateMetaBucket(be.BatchTx())
|
||||
schema.CreateMetaBucket(be.BatchTx())
|
||||
|
||||
ci := cindex.NewConsistentIndex(be)
|
||||
srv := &EtcdServer{
|
||||
@ -696,9 +696,9 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
srv.beHooks.OnPreCommitUnsafe(tx)
|
||||
assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *buckets.UnsafeConfStateFromBackend(lg, tx))
|
||||
assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *schema.UnsafeConfStateFromBackend(lg, tx))
|
||||
})
|
||||
rindex, rterm := buckets.ReadConsistentIndex(be.BatchTx())
|
||||
rindex, rterm := schema.ReadConsistentIndex(be.BatchTx())
|
||||
assert.Equal(t, consistIndex, rindex)
|
||||
assert.Equal(t, uint64(4), rterm)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"go.uber.org/zap"
|
||||
|
@ -15,19 +15,10 @@
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/pkg/v3/pbutil"
|
||||
"go.etcd.io/etcd/raft/v3"
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/config"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
|
||||
"go.etcd.io/etcd/server/v3/wal"
|
||||
"go.etcd.io/etcd/server/v3/wal/walpb"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
@ -81,133 +72,3 @@ func (st *storage) Release(snap raftpb.Snapshot) error {
|
||||
}
|
||||
return st.Snapshotter.ReleaseSnapDBs(snap)
|
||||
}
|
||||
|
||||
// bootstrapWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
|
||||
// after the position of the given snap in the WAL.
|
||||
// The snap must have been previously saved to the WAL, or this call will panic.
|
||||
func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Snapshot, unsafeNoFsync bool) *bootstrappedWAL {
|
||||
var walsnap walpb.Snapshot
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
repaired := false
|
||||
for {
|
||||
w, err := wal.Open(lg, waldir, walsnap)
|
||||
if err != nil {
|
||||
lg.Fatal("failed to open WAL", zap.Error(err))
|
||||
}
|
||||
if unsafeNoFsync {
|
||||
w.SetUnsafeNoFsync()
|
||||
}
|
||||
wmetadata, st, ents, err := w.ReadAll()
|
||||
if err != nil {
|
||||
w.Close()
|
||||
// we can only repair ErrUnexpectedEOF and we never repair twice.
|
||||
if repaired || err != io.ErrUnexpectedEOF {
|
||||
lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
|
||||
}
|
||||
if !wal.Repair(lg, waldir) {
|
||||
lg.Fatal("failed to repair WAL", zap.Error(err))
|
||||
} else {
|
||||
lg.Info("repaired WAL", zap.Error(err))
|
||||
repaired = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
var metadata pb.Metadata
|
||||
pbutil.MustUnmarshal(&metadata, wmetadata)
|
||||
id := types.ID(metadata.NodeID)
|
||||
cid := types.ID(metadata.ClusterID)
|
||||
return &bootstrappedWAL{
|
||||
lg: lg,
|
||||
w: w,
|
||||
id: id,
|
||||
cid: cid,
|
||||
st: &st,
|
||||
ents: ents,
|
||||
snapshot: snapshot,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bootstrapNewWAL(cfg config.ServerConfig, nodeID, clusterID types.ID) *bootstrappedWAL {
|
||||
metadata := pbutil.MustMarshal(
|
||||
&pb.Metadata{
|
||||
NodeID: uint64(nodeID),
|
||||
ClusterID: uint64(clusterID),
|
||||
},
|
||||
)
|
||||
w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
|
||||
if err != nil {
|
||||
cfg.Logger.Panic("failed to create WAL", zap.Error(err))
|
||||
}
|
||||
if cfg.UnsafeNoFsync {
|
||||
w.SetUnsafeNoFsync()
|
||||
}
|
||||
return &bootstrappedWAL{
|
||||
lg: cfg.Logger,
|
||||
w: w,
|
||||
id: nodeID,
|
||||
cid: clusterID,
|
||||
}
|
||||
}
|
||||
|
||||
type bootstrappedWAL struct {
|
||||
lg *zap.Logger
|
||||
|
||||
w *wal.WAL
|
||||
id, cid types.ID
|
||||
st *raftpb.HardState
|
||||
ents []raftpb.Entry
|
||||
snapshot *raftpb.Snapshot
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
|
||||
s := raft.NewMemoryStorage()
|
||||
if wal.snapshot != nil {
|
||||
s.ApplySnapshot(*wal.snapshot)
|
||||
}
|
||||
if wal.st != nil {
|
||||
s.SetHardState(*wal.st)
|
||||
}
|
||||
if len(wal.ents) != 0 {
|
||||
s.Append(wal.ents)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
|
||||
for i, ent := range wal.ents {
|
||||
if ent.Index > wal.st.Commit {
|
||||
wal.lg.Info(
|
||||
"discarding uncommitted WAL entries",
|
||||
zap.Uint64("entry-index", ent.Index),
|
||||
zap.Uint64("commit-index-from-wal", wal.st.Commit),
|
||||
zap.Int("number-of-discarded-entries", len(wal.ents)-i),
|
||||
)
|
||||
return wal.ents[:i]
|
||||
}
|
||||
}
|
||||
return wal.ents
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) ConfigChangeEntries() []raftpb.Entry {
|
||||
return createConfigChangeEnts(
|
||||
wal.lg,
|
||||
getIDs(wal.lg, wal.snapshot, wal.ents),
|
||||
uint64(wal.id),
|
||||
wal.st.Term,
|
||||
wal.st.Commit,
|
||||
)
|
||||
}
|
||||
|
||||
func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) {
|
||||
wal.ents = append(wal.ents, ents...)
|
||||
err := wal.w.Save(raftpb.HardState{}, ents)
|
||||
if err != nil {
|
||||
wal.lg.Fatal("failed to save hard state and entries", zap.Error(err))
|
||||
}
|
||||
if len(wal.ents) != 0 {
|
||||
wal.st.Commit = wal.ents[len(wal.ents)-1].Index
|
||||
}
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/lease/leasehttp"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
|
@ -26,8 +26,8 @@ import (
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/server/v3/lease/leasepb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -336,7 +336,7 @@ func (le *lessor) Revoke(id LeaseID) error {
|
||||
// lease deletion needs to be in the same backend transaction with the
|
||||
// kv deletion. Or we might end up with not executing the revoke or not
|
||||
// deleting the keys if etcdserver fails in between.
|
||||
buckets.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)})
|
||||
schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)})
|
||||
|
||||
txn.End()
|
||||
|
||||
@ -770,8 +770,8 @@ func (le *lessor) initAndRecover() {
|
||||
tx := le.b.BatchTx()
|
||||
|
||||
tx.Lock()
|
||||
buckets.UnsafeCreateLeaseBucket(tx)
|
||||
lpbs := buckets.MustUnsafeGetAllLeases(tx)
|
||||
schema.UnsafeCreateLeaseBucket(tx)
|
||||
lpbs := schema.MustUnsafeGetAllLeases(tx)
|
||||
tx.Unlock()
|
||||
for _, lpb := range lpbs {
|
||||
ID := LeaseID(lpb.ID)
|
||||
@ -818,7 +818,7 @@ func (l *Lease) persistTo(b backend.Backend) {
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
buckets.MustUnsafePutLease(tx, &lpb)
|
||||
schema.MustUnsafePutLease(tx, &lpb)
|
||||
}
|
||||
|
||||
// TTL returns the TTL of the Lease.
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -27,8 +27,8 @@ import (
|
||||
"time"
|
||||
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -95,7 +95,7 @@ func TestLessorGrant(t *testing.T) {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
lpb := buckets.MustUnsafeGetLease(tx, int64(l.ID))
|
||||
lpb := schema.MustUnsafeGetLease(tx, int64(l.ID))
|
||||
if lpb == nil {
|
||||
t.Errorf("lpb = %d, want not nil", lpb)
|
||||
}
|
||||
@ -199,7 +199,7 @@ func TestLessorRevoke(t *testing.T) {
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
lpb := buckets.MustUnsafeGetLease(tx, int64(l.ID))
|
||||
lpb := schema.MustUnsafeGetLease(tx, int64(l.ID))
|
||||
if lpb != nil {
|
||||
t.Errorf("lpb = %d, want nil", lpb)
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
)
|
||||
|
||||
type watchRange struct {
|
||||
|
@ -20,8 +20,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
func BenchmarkBackendPut(b *testing.B) {
|
||||
@ -42,13 +42,13 @@ func BenchmarkBackendPut(b *testing.B) {
|
||||
batchTx := backend.BatchTx()
|
||||
|
||||
batchTx.Lock()
|
||||
batchTx.UnsafeCreateBucket(buckets.Test)
|
||||
batchTx.UnsafeCreateBucket(schema.Test)
|
||||
batchTx.Unlock()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
batchTx.Lock()
|
||||
batchTx.UnsafePut(buckets.Test, keys[i], value)
|
||||
batchTx.UnsafePut(schema.Test, keys[i], value)
|
||||
batchTx.Unlock()
|
||||
}
|
||||
}
|
@ -23,9 +23,9 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
func TestBackendClose(t *testing.T) {
|
||||
@ -53,8 +53,8 @@ func TestBackendSnapshot(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
|
||||
@ -78,7 +78,7 @@ func TestBackendSnapshot(t *testing.T) {
|
||||
|
||||
newTx := nb.BatchTx()
|
||||
newTx.Lock()
|
||||
ks, _ := newTx.UnsafeRange(buckets.Test, []byte("foo"), []byte("goo"), 0)
|
||||
ks, _ := newTx.UnsafeRange(schema.Test, []byte("foo"), []byte("goo"), 0)
|
||||
if len(ks) != 1 {
|
||||
t.Errorf("len(kvs) = %d, want 1", len(ks))
|
||||
}
|
||||
@ -95,8 +95,8 @@ func TestBackendBatchIntervalCommit(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
|
||||
tx.Unlock()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -127,9 +127,9 @@ func TestBackendDefrag(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
for i := 0; i < backend.DefragLimitForTest()+100; i++ {
|
||||
tx.UnsafePut(buckets.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
|
||||
tx.UnsafePut(schema.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
|
||||
}
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
@ -138,7 +138,7 @@ func TestBackendDefrag(t *testing.T) {
|
||||
tx = b.BatchTx()
|
||||
tx.Lock()
|
||||
for i := 0; i < 50; i++ {
|
||||
tx.UnsafeDelete(buckets.Test, []byte(fmt.Sprintf("foo_%d", i)))
|
||||
tx.UnsafeDelete(schema.Test, []byte(fmt.Sprintf("foo_%d", i)))
|
||||
}
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
@ -172,8 +172,8 @@ func TestBackendDefrag(t *testing.T) {
|
||||
// try put more keys after shrink.
|
||||
tx = b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafePut(buckets.Test, []byte("more"), []byte("bar"))
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
tx.UnsafePut(schema.Test, []byte("more"), []byte("bar"))
|
||||
tx.Unlock()
|
||||
b.ForceCommit()
|
||||
}
|
||||
@ -185,15 +185,15 @@ func TestBackendWriteback(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Key)
|
||||
tx.UnsafePut(buckets.Key, []byte("abc"), []byte("bar"))
|
||||
tx.UnsafePut(buckets.Key, []byte("def"), []byte("baz"))
|
||||
tx.UnsafePut(buckets.Key, []byte("overwrite"), []byte("1"))
|
||||
tx.UnsafeCreateBucket(schema.Key)
|
||||
tx.UnsafePut(schema.Key, []byte("abc"), []byte("bar"))
|
||||
tx.UnsafePut(schema.Key, []byte("def"), []byte("baz"))
|
||||
tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("1"))
|
||||
tx.Unlock()
|
||||
|
||||
// overwrites should be propagated too
|
||||
tx.Lock()
|
||||
tx.UnsafePut(buckets.Key, []byte("overwrite"), []byte("2"))
|
||||
tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("2"))
|
||||
tx.Unlock()
|
||||
|
||||
keys := []struct {
|
||||
@ -246,7 +246,7 @@ func TestBackendWriteback(t *testing.T) {
|
||||
func() {
|
||||
rtx.RLock()
|
||||
defer rtx.RUnlock()
|
||||
k, v := rtx.UnsafeRange(buckets.Key, tt.key, tt.end, tt.limit)
|
||||
k, v := rtx.UnsafeRange(schema.Key, tt.key, tt.end, tt.limit)
|
||||
if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
|
||||
t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
|
||||
}
|
||||
@ -261,20 +261,20 @@ func TestConcurrentReadTx(t *testing.T) {
|
||||
|
||||
wtx1 := b.BatchTx()
|
||||
wtx1.Lock()
|
||||
wtx1.UnsafeCreateBucket(buckets.Key)
|
||||
wtx1.UnsafePut(buckets.Key, []byte("abc"), []byte("ABC"))
|
||||
wtx1.UnsafePut(buckets.Key, []byte("overwrite"), []byte("1"))
|
||||
wtx1.UnsafeCreateBucket(schema.Key)
|
||||
wtx1.UnsafePut(schema.Key, []byte("abc"), []byte("ABC"))
|
||||
wtx1.UnsafePut(schema.Key, []byte("overwrite"), []byte("1"))
|
||||
wtx1.Unlock()
|
||||
|
||||
wtx2 := b.BatchTx()
|
||||
wtx2.Lock()
|
||||
wtx2.UnsafePut(buckets.Key, []byte("def"), []byte("DEF"))
|
||||
wtx2.UnsafePut(buckets.Key, []byte("overwrite"), []byte("2"))
|
||||
wtx2.UnsafePut(schema.Key, []byte("def"), []byte("DEF"))
|
||||
wtx2.UnsafePut(schema.Key, []byte("overwrite"), []byte("2"))
|
||||
wtx2.Unlock()
|
||||
|
||||
rtx := b.ConcurrentReadTx()
|
||||
rtx.RLock() // no-op
|
||||
k, v := rtx.UnsafeRange(buckets.Key, []byte("abc"), []byte("\xff"), 0)
|
||||
k, v := rtx.UnsafeRange(schema.Key, []byte("abc"), []byte("\xff"), 0)
|
||||
rtx.RUnlock()
|
||||
wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}
|
||||
wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")}
|
||||
@ -291,10 +291,10 @@ func TestBackendWritebackForEach(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Key)
|
||||
tx.UnsafeCreateBucket(schema.Key)
|
||||
for i := 0; i < 5; i++ {
|
||||
k := []byte(fmt.Sprintf("%04d", i))
|
||||
tx.UnsafePut(buckets.Key, k, []byte("bar"))
|
||||
tx.UnsafePut(schema.Key, k, []byte("bar"))
|
||||
}
|
||||
tx.Unlock()
|
||||
|
||||
@ -302,10 +302,10 @@ func TestBackendWritebackForEach(t *testing.T) {
|
||||
b.ForceCommit()
|
||||
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Key)
|
||||
tx.UnsafeCreateBucket(schema.Key)
|
||||
for i := 5; i < 20; i++ {
|
||||
k := []byte(fmt.Sprintf("%04d", i))
|
||||
tx.UnsafePut(buckets.Key, k, []byte("bar"))
|
||||
tx.UnsafePut(schema.Key, k, []byte("bar"))
|
||||
}
|
||||
tx.Unlock()
|
||||
|
||||
@ -316,7 +316,7 @@ func TestBackendWritebackForEach(t *testing.T) {
|
||||
}
|
||||
rtx := b.ReadTx()
|
||||
rtx.RLock()
|
||||
assert.NoError(t, rtx.UnsafeForEach(buckets.Key, getSeq))
|
||||
assert.NoError(t, rtx.UnsafeForEach(schema.Key, getSeq))
|
||||
rtx.RUnlock()
|
||||
|
||||
partialSeq := seq
|
||||
@ -325,7 +325,7 @@ func TestBackendWritebackForEach(t *testing.T) {
|
||||
b.ForceCommit()
|
||||
|
||||
tx.Lock()
|
||||
assert.NoError(t, tx.UnsafeForEach(buckets.Key, getSeq))
|
||||
assert.NoError(t, tx.UnsafeForEach(schema.Key, getSeq))
|
||||
tx.Unlock()
|
||||
|
||||
if seq != partialSeq {
|
@ -20,9 +20,9 @@ import (
|
||||
"time"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
func TestBatchTxPut(t *testing.T) {
|
||||
@ -34,18 +34,18 @@ func TestBatchTxPut(t *testing.T) {
|
||||
tx.Lock()
|
||||
|
||||
// create bucket
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
|
||||
// put
|
||||
v := []byte("bar")
|
||||
tx.UnsafePut(buckets.Test, []byte("foo"), v)
|
||||
tx.UnsafePut(schema.Test, []byte("foo"), v)
|
||||
|
||||
tx.Unlock()
|
||||
|
||||
// check put result before and after tx is committed
|
||||
for k := 0; k < 2; k++ {
|
||||
tx.Lock()
|
||||
_, gv := tx.UnsafeRange(buckets.Test, []byte("foo"), nil, 0)
|
||||
_, gv := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0)
|
||||
tx.Unlock()
|
||||
if !reflect.DeepEqual(gv[0], v) {
|
||||
t.Errorf("v = %s, want %s", string(gv[0]), string(v))
|
||||
@ -62,12 +62,12 @@ func TestBatchTxRange(t *testing.T) {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
// put keys
|
||||
allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
|
||||
allVals := [][]byte{[]byte("bar"), []byte("bar1"), []byte("bar2")}
|
||||
for i := range allKeys {
|
||||
tx.UnsafePut(buckets.Test, allKeys[i], allVals[i])
|
||||
tx.UnsafePut(schema.Test, allKeys[i], allVals[i])
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@ -115,7 +115,7 @@ func TestBatchTxRange(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
keys, vals := tx.UnsafeRange(buckets.Test, tt.key, tt.endKey, tt.limit)
|
||||
keys, vals := tx.UnsafeRange(schema.Test, tt.key, tt.endKey, tt.limit)
|
||||
if !reflect.DeepEqual(keys, tt.wkeys) {
|
||||
t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys)
|
||||
}
|
||||
@ -132,17 +132,17 @@ func TestBatchTxDelete(t *testing.T) {
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
|
||||
|
||||
tx.UnsafeDelete(buckets.Test, []byte("foo"))
|
||||
tx.UnsafeDelete(schema.Test, []byte("foo"))
|
||||
|
||||
tx.Unlock()
|
||||
|
||||
// check put result before and after tx is committed
|
||||
for k := 0; k < 2; k++ {
|
||||
tx.Lock()
|
||||
ks, _ := tx.UnsafeRange(buckets.Test, []byte("foo"), nil, 0)
|
||||
ks, _ := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0)
|
||||
tx.Unlock()
|
||||
if len(ks) != 0 {
|
||||
t.Errorf("keys on foo = %v, want nil", ks)
|
||||
@ -157,15 +157,15 @@ func TestBatchTxCommit(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
|
||||
tx.Unlock()
|
||||
|
||||
tx.Commit()
|
||||
|
||||
// check whether put happens via db view
|
||||
backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(buckets.Test.Name())
|
||||
bucket := tx.Bucket(schema.Test.Name())
|
||||
if bucket == nil {
|
||||
t.Errorf("bucket test does not exit")
|
||||
return nil
|
||||
@ -186,14 +186,14 @@ func TestBatchTxBatchLimitCommit(t *testing.T) {
|
||||
|
||||
tx := b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Test)
|
||||
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar"))
|
||||
tx.UnsafeCreateBucket(schema.Test)
|
||||
tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
|
||||
tx.Unlock()
|
||||
|
||||
// batch limit commit should have been triggered
|
||||
// check whether put happens via db view
|
||||
backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(buckets.Test.Name())
|
||||
bucket := tx.Bucket(schema.Test.Name())
|
||||
if bucket == nil {
|
||||
t.Errorf("bucket test does not exit")
|
||||
return nil
|
@ -20,13 +20,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
bucket = buckets.Test
|
||||
bucket = schema.Test
|
||||
key = []byte("key")
|
||||
)
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
type RangeOptions struct {
|
@ -26,8 +26,8 @@ import (
|
||||
"go.etcd.io/etcd/client/pkg/v3/testutil"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
@ -27,8 +27,8 @@ import (
|
||||
"go.etcd.io/etcd/pkg/v3/schedule"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -122,8 +122,8 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi
|
||||
|
||||
tx := s.b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Key)
|
||||
tx.UnsafeCreateBucket(buckets.Meta)
|
||||
tx.UnsafeCreateBucket(schema.Key)
|
||||
tx.UnsafeCreateBucket(schema.Meta)
|
||||
tx.Unlock()
|
||||
s.b.ForceCommit()
|
||||
|
||||
@ -161,7 +161,7 @@ func (s *store) Hash() (hash uint32, revision int64, err error) {
|
||||
start := time.Now()
|
||||
|
||||
s.b.ForceCommit()
|
||||
h, err := s.b.Hash(buckets.DefaultIgnores)
|
||||
h, err := s.b.Hash(schema.DefaultIgnores)
|
||||
|
||||
hashSec.Observe(time.Since(start).Seconds())
|
||||
return h, s.currentRev, err
|
||||
@ -197,8 +197,8 @@ func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev
|
||||
lower := revision{main: compactRev + 1}
|
||||
h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
|
||||
h.Write(buckets.Key.Name())
|
||||
err = tx.UnsafeForEach(buckets.Key, func(k, v []byte) error {
|
||||
h.Write(schema.Key.Name())
|
||||
err = tx.UnsafeForEach(schema.Key, func(k, v []byte) error {
|
||||
kr := bytesToRev(k)
|
||||
if !upper.GreaterThan(kr) {
|
||||
return nil
|
||||
@ -340,8 +340,8 @@ func (s *store) restore() error {
|
||||
|
||||
s.lg.Info(
|
||||
"restored last compact revision",
|
||||
zap.Stringer("meta-bucket-name", buckets.Meta),
|
||||
zap.String("meta-bucket-name-key", string(buckets.FinishedCompactKeyName)),
|
||||
zap.Stringer("meta-bucket-name", schema.Meta),
|
||||
zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)),
|
||||
zap.Int64("restored-compact-revision", s.compactMainRev),
|
||||
)
|
||||
s.revMu.Unlock()
|
||||
@ -351,7 +351,7 @@ func (s *store) restore() error {
|
||||
keysGauge.Set(0)
|
||||
rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
|
||||
for {
|
||||
keys, vals := tx.UnsafeRange(buckets.Key, min, max, int64(restoreChunkKeys))
|
||||
keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys))
|
||||
if len(keys) == 0 {
|
||||
break
|
||||
}
|
||||
@ -412,8 +412,8 @@ func (s *store) restore() error {
|
||||
|
||||
s.lg.Info(
|
||||
"resume scheduled compaction",
|
||||
zap.Stringer("meta-bucket-name", buckets.Meta),
|
||||
zap.String("meta-bucket-name-key", string(buckets.ScheduledCompactKeyName)),
|
||||
zap.Stringer("meta-bucket-name", schema.Meta),
|
||||
zap.String("meta-bucket-name-key", string(schema.ScheduledCompactKeyName)),
|
||||
zap.Int64("scheduled-compact-revision", scheduledCompact),
|
||||
)
|
||||
}
|
@ -22,8 +22,8 @@ import (
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -84,7 +84,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
|
||||
|
||||
tx := be.BatchTx()
|
||||
tx.Lock()
|
||||
buckets.UnsafeCreateMetaBucket(tx)
|
||||
schema.UnsafeCreateMetaBucket(tx)
|
||||
ci.UnsafeSave(tx)
|
||||
tx.Unlock()
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -43,11 +43,11 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
|
||||
|
||||
tx := s.b.BatchTx()
|
||||
tx.Lock()
|
||||
keys, _ := tx.UnsafeRange(buckets.Key, last, end, int64(batchNum))
|
||||
keys, _ := tx.UnsafeRange(schema.Key, last, end, int64(batchNum))
|
||||
for _, key := range keys {
|
||||
rev = bytesToRev(key)
|
||||
if _, ok := keep[rev]; !ok {
|
||||
tx.UnsafeDelete(buckets.Key, key)
|
||||
tx.UnsafeDelete(schema.Key, key)
|
||||
keyCompactions++
|
||||
}
|
||||
}
|
@ -23,8 +23,8 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -75,7 +75,7 @@ func TestScheduleCompaction(t *testing.T) {
|
||||
ibytes := newRevBytes()
|
||||
for _, rev := range revs {
|
||||
revToBytes(rev, ibytes)
|
||||
tx.UnsafePut(buckets.Key, ibytes, []byte("bar"))
|
||||
tx.UnsafePut(schema.Key, ibytes, []byte("bar"))
|
||||
}
|
||||
tx.Unlock()
|
||||
|
||||
@ -84,14 +84,14 @@ func TestScheduleCompaction(t *testing.T) {
|
||||
tx.Lock()
|
||||
for _, rev := range tt.wrevs {
|
||||
revToBytes(rev, ibytes)
|
||||
keys, _ := tx.UnsafeRange(buckets.Key, ibytes, nil, 0)
|
||||
keys, _ := tx.UnsafeRange(schema.Key, ibytes, nil, 0)
|
||||
if len(keys) != 1 {
|
||||
t.Errorf("#%d: range on %v = %d, want 1", i, rev, len(keys))
|
||||
}
|
||||
}
|
||||
vals, _ := UnsafeReadFinishedCompact(tx)
|
||||
if !reflect.DeepEqual(vals, tt.rev) {
|
||||
t.Errorf("#%d: vals on %v = %+v, want %+v", i, buckets.FinishedCompactKeyName, vals, tt.rev)
|
||||
t.Errorf("#%d: vals on %v = %+v, want %+v", i, schema.FinishedCompactKeyName, vals, tt.rev)
|
||||
}
|
||||
tx.Unlock()
|
||||
|
@ -35,9 +35,9 @@ import (
|
||||
"go.etcd.io/etcd/pkg/v3/schedule"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -149,12 +149,12 @@ func TestStorePut(t *testing.T) {
|
||||
}
|
||||
|
||||
wact := []testutil.Action{
|
||||
{Name: "seqput", Params: []interface{}{buckets.Key, tt.wkey, data}},
|
||||
{Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}},
|
||||
}
|
||||
|
||||
if tt.rr != nil {
|
||||
wact = []testutil.Action{
|
||||
{Name: "seqput", Params: []interface{}{buckets.Key, tt.wkey, data}},
|
||||
{Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}},
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,7 +229,7 @@ func TestStoreRange(t *testing.T) {
|
||||
wstart := newRevBytes()
|
||||
revToBytes(tt.idxr.revs[0], wstart)
|
||||
wact := []testutil.Action{
|
||||
{Name: "range", Params: []interface{}{buckets.Key, wstart, []byte(nil), int64(0)}},
|
||||
{Name: "range", Params: []interface{}{schema.Key, wstart, []byte(nil), int64(0)}},
|
||||
}
|
||||
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
|
||||
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
|
||||
@ -304,7 +304,7 @@ func TestStoreDeleteRange(t *testing.T) {
|
||||
t.Errorf("#%d: marshal err = %v, want nil", i, err)
|
||||
}
|
||||
wact := []testutil.Action{
|
||||
{Name: "seqput", Params: []interface{}{buckets.Key, tt.wkey, data}},
|
||||
{Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}},
|
||||
}
|
||||
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
|
||||
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
|
||||
@ -343,10 +343,10 @@ func TestStoreCompact(t *testing.T) {
|
||||
end := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(end, uint64(4))
|
||||
wact := []testutil.Action{
|
||||
{Name: "put", Params: []interface{}{buckets.Meta, buckets.ScheduledCompactKeyName, newTestRevBytes(revision{3, 0})}},
|
||||
{Name: "range", Params: []interface{}{buckets.Key, make([]byte, 17), end, int64(10000)}},
|
||||
{Name: "delete", Params: []interface{}{buckets.Key, key2}},
|
||||
{Name: "put", Params: []interface{}{buckets.Meta, buckets.FinishedCompactKeyName, newTestRevBytes(revision{3, 0})}},
|
||||
{Name: "put", Params: []interface{}{schema.Meta, schema.ScheduledCompactKeyName, newTestRevBytes(revision{3, 0})}},
|
||||
{Name: "range", Params: []interface{}{schema.Key, make([]byte, 17), end, int64(10000)}},
|
||||
{Name: "delete", Params: []interface{}{schema.Key, key2}},
|
||||
{Name: "put", Params: []interface{}{schema.Meta, schema.FinishedCompactKeyName, newTestRevBytes(revision{3, 0})}},
|
||||
}
|
||||
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
|
||||
t.Errorf("tx actions = %+v, want %+v", g, wact)
|
||||
@ -384,8 +384,8 @@ func TestStoreRestore(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b.tx.rangeRespc <- rangeResp{[][]byte{buckets.FinishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
|
||||
b.tx.rangeRespc <- rangeResp{[][]byte{buckets.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
|
||||
b.tx.rangeRespc <- rangeResp{[][]byte{schema.FinishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
|
||||
b.tx.rangeRespc <- rangeResp{[][]byte{schema.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
|
||||
|
||||
b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
|
||||
b.tx.rangeRespc <- rangeResp{nil, nil}
|
||||
@ -399,9 +399,9 @@ func TestStoreRestore(t *testing.T) {
|
||||
t.Errorf("current rev = %v, want 5", s.currentRev)
|
||||
}
|
||||
wact := []testutil.Action{
|
||||
{Name: "range", Params: []interface{}{buckets.Meta, buckets.FinishedCompactKeyName, []byte(nil), int64(0)}},
|
||||
{Name: "range", Params: []interface{}{buckets.Meta, buckets.ScheduledCompactKeyName, []byte(nil), int64(0)}},
|
||||
{Name: "range", Params: []interface{}{buckets.Key, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}},
|
||||
{Name: "range", Params: []interface{}{schema.Meta, schema.FinishedCompactKeyName, []byte(nil), int64(0)}},
|
||||
{Name: "range", Params: []interface{}{schema.Meta, schema.ScheduledCompactKeyName, []byte(nil), int64(0)}},
|
||||
{Name: "range", Params: []interface{}{schema.Key, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}},
|
||||
}
|
||||
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
|
||||
t.Errorf("tx actions = %+v, want %+v", g, wact)
|
||||
@ -485,7 +485,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
|
||||
revToBytes(revision{main: 2}, rbytes)
|
||||
tx := s0.b.BatchTx()
|
||||
tx.Lock()
|
||||
tx.UnsafePut(buckets.Meta, buckets.ScheduledCompactKeyName, rbytes)
|
||||
tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes)
|
||||
tx.Unlock()
|
||||
|
||||
s0.Close()
|
||||
@ -514,7 +514,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
|
||||
for i := 0; i < 5; i++ {
|
||||
tx := s.b.BatchTx()
|
||||
tx.Lock()
|
||||
ks, _ := tx.UnsafeRange(buckets.Key, revbytes, nil, 0)
|
||||
ks, _ := tx.UnsafeRange(schema.Key, revbytes, nil, 0)
|
||||
tx.Unlock()
|
||||
if len(ks) != 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
@ -20,8 +20,8 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -160,7 +160,7 @@ func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev i
|
||||
default:
|
||||
}
|
||||
revToBytes(revpair, revBytes)
|
||||
_, vs := tr.tx.UnsafeRange(buckets.Key, revBytes, nil, 0)
|
||||
_, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0)
|
||||
if len(vs) != 1 {
|
||||
tr.s.lg.Fatal(
|
||||
"range failed to find revision pair",
|
||||
@ -215,7 +215,7 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
|
||||
}
|
||||
|
||||
tw.trace.Step("marshal mvccpb.KeyValue")
|
||||
tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d)
|
||||
tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
|
||||
tw.s.kvindex.Put(key, idxRev)
|
||||
tw.changes = append(tw.changes, kv)
|
||||
tw.trace.Step("store kv pair into bolt db")
|
||||
@ -276,7 +276,7 @@ func (tw *storeTxnWrite) delete(key []byte) {
|
||||
)
|
||||
}
|
||||
|
||||
tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d)
|
||||
tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
|
||||
err = tw.s.kvindex.Tombstone(key, idxRev)
|
||||
if err != nil {
|
||||
tw.storeTxnRead.s.lg.Fatal(
|
@ -15,12 +15,12 @@
|
||||
package mvcc
|
||||
|
||||
import (
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found bool) {
|
||||
_, finishedCompactBytes := tx.UnsafeRange(buckets.Meta, buckets.FinishedCompactKeyName, nil, 0)
|
||||
_, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0)
|
||||
if len(finishedCompactBytes) != 0 {
|
||||
return bytesToRev(finishedCompactBytes[0]).main, true
|
||||
}
|
||||
@ -28,7 +28,7 @@ func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found b
|
||||
}
|
||||
|
||||
func UnsafeReadScheduledCompact(tx backend.ReadTx) (scheduledComact int64, found bool) {
|
||||
_, scheduledCompactBytes := tx.UnsafeRange(buckets.Meta, buckets.ScheduledCompactKeyName, nil, 0)
|
||||
_, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0)
|
||||
if len(scheduledCompactBytes) != 0 {
|
||||
return bytesToRev(scheduledCompactBytes[0]).main, true
|
||||
}
|
||||
@ -44,7 +44,7 @@ func SetScheduledCompact(tx backend.BatchTx, value int64) {
|
||||
func UnsafeSetScheduledCompact(tx backend.BatchTx, value int64) {
|
||||
rbytes := newRevBytes()
|
||||
revToBytes(revision{main: value}, rbytes)
|
||||
tx.UnsafePut(buckets.Meta, buckets.ScheduledCompactKeyName, rbytes)
|
||||
tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes)
|
||||
}
|
||||
|
||||
func SetFinishedCompact(tx backend.BatchTx, value int64) {
|
||||
@ -56,5 +56,5 @@ func SetFinishedCompact(tx backend.BatchTx, value int64) {
|
||||
func UnsafeSetFinishedCompact(tx backend.BatchTx, value int64) {
|
||||
rbytes := newRevBytes()
|
||||
revToBytes(revision{main: value}, rbytes)
|
||||
tx.UnsafePut(buckets.Meta, buckets.FinishedCompactKeyName, rbytes)
|
||||
tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes)
|
||||
}
|
@ -8,9 +8,9 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
// TestScheduledCompact ensures that UnsafeSetScheduledCompact&UnsafeReadScheduledCompact work well together.
|
||||
@ -39,7 +39,7 @@ func TestScheduledCompact(t *testing.T) {
|
||||
t.Fatal("batch tx is nil")
|
||||
}
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Meta)
|
||||
tx.UnsafeCreateBucket(schema.Meta)
|
||||
UnsafeSetScheduledCompact(tx, tc.value)
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
||||
@ -80,7 +80,7 @@ func TestFinishedCompact(t *testing.T) {
|
||||
t.Fatal("batch tx is nil")
|
||||
}
|
||||
tx.Lock()
|
||||
tx.UnsafeCreateBucket(buckets.Meta)
|
||||
tx.UnsafeCreateBucket(schema.Meta)
|
||||
UnsafeSetFinishedCompact(tx, tc.value)
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
@ -18,8 +18,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
)
|
||||
|
||||
func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
|
||||
@ -32,6 +32,6 @@ func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
|
||||
}
|
||||
|
||||
be.BatchTx().Lock()
|
||||
be.BatchTx().UnsafePut(buckets.Key, ibytes, d)
|
||||
be.BatchTx().UnsafePut(schema.Key, ibytes, d)
|
||||
be.BatchTx().Unlock()
|
||||
}
|
@ -21,8 +21,8 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@ -354,7 +354,7 @@ func (s *watchableStore) syncWatchers() int {
|
||||
// values are actual key-value pairs in backend.
|
||||
tx := s.store.b.ReadTx()
|
||||
tx.RLock()
|
||||
revs, vs := tx.UnsafeRange(buckets.Key, minBytes, maxBytes, 0)
|
||||
revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0)
|
||||
tx.RUnlock()
|
||||
evs := kvsToEvents(s.store.lg, wg, revs, vs)
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
@ -26,7 +26,7 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
const (
|
@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"go.etcd.io/etcd/api/v3/authpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
)
|
||||
|
||||
// TestAuthEnabled ensures that UnsafeSaveAuthEnabled&UnsafeReadAuthEnabled work well together.
|
@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"go.etcd.io/etcd/api/v3/authpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -12,13 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
var (
|
@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet).
|
@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
@ -12,14 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/lease/leasepb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
func UnsafeCreateLeaseBucket(tx backend.BatchTx) {
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@ -20,7 +20,7 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/types"
|
||||
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"go.uber.org/zap"
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package version
|
||||
package schema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -20,8 +20,7 @@ import (
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -29,8 +28,8 @@ var (
|
||||
V3_6 = semver.Version{Major: 3, Minor: 6}
|
||||
)
|
||||
|
||||
// UpdateStorageVersion updates storage version.
|
||||
func UpdateStorageVersion(lg *zap.Logger, tx backend.BatchTx) error {
|
||||
// UpdateStorageSchema updates storage version.
|
||||
func UpdateStorageSchema(lg *zap.Logger, tx backend.BatchTx) error {
|
||||
tx.Lock()
|
||||
defer tx.Unlock()
|
||||
v, err := detectStorageVersion(lg, tx)
|
||||
@ -41,7 +40,7 @@ func UpdateStorageVersion(lg *zap.Logger, tx backend.BatchTx) error {
|
||||
case V3_5:
|
||||
lg.Warn("setting storage version", zap.String("storage-version", V3_6.String()))
|
||||
// All meta keys introduced in v3.6 should be filled in here.
|
||||
buckets.UnsafeSetStorageVersion(tx, &V3_6)
|
||||
UnsafeSetStorageVersion(tx, &V3_6)
|
||||
case V3_6:
|
||||
default:
|
||||
lg.Warn("unknown storage version", zap.String("storage-version", v.String()))
|
||||
@ -50,17 +49,17 @@ func UpdateStorageVersion(lg *zap.Logger, tx backend.BatchTx) error {
|
||||
}
|
||||
|
||||
func detectStorageVersion(lg *zap.Logger, tx backend.ReadTx) (*semver.Version, error) {
|
||||
v := buckets.UnsafeReadStorageVersion(tx)
|
||||
v := UnsafeReadStorageVersion(tx)
|
||||
if v != nil {
|
||||
return v, nil
|
||||
}
|
||||
confstate := buckets.UnsafeConfStateFromBackend(lg, tx)
|
||||
confstate := UnsafeConfStateFromBackend(lg, tx)
|
||||
if confstate == nil {
|
||||
return nil, fmt.Errorf("missing %q key", buckets.MetaConfStateName)
|
||||
return nil, fmt.Errorf("missing %q key", MetaConfStateName)
|
||||
}
|
||||
_, term := buckets.UnsafeReadConsistentIndex(tx)
|
||||
_, term := UnsafeReadConsistentIndex(tx)
|
||||
if term == 0 {
|
||||
return nil, fmt.Errorf("missing %q key", buckets.MetaTermKeyName)
|
||||
return nil, fmt.Errorf("missing %q key", MetaTermKeyName)
|
||||
}
|
||||
copied := V3_5
|
||||
return &copied, nil
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package version
|
||||
package schema
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@ -22,9 +22,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@ -47,7 +46,7 @@ func TestUpdateStorageVersion(t *testing.T) {
|
||||
{
|
||||
name: `Backend before 3.6 without "term" should be rejected`,
|
||||
version: "",
|
||||
metaKeys: [][]byte{buckets.MetaConfStateName},
|
||||
metaKeys: [][]byte{MetaConfStateName},
|
||||
expectVersion: nil,
|
||||
expectError: true,
|
||||
expectedErrorMsg: `cannot determine storage version: missing "term" key`,
|
||||
@ -55,25 +54,25 @@ func TestUpdateStorageVersion(t *testing.T) {
|
||||
{
|
||||
name: "Backend with 3.5 with all metadata keys should be upgraded to v3.6",
|
||||
version: "",
|
||||
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName},
|
||||
metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName},
|
||||
expectVersion: &semver.Version{Major: 3, Minor: 6},
|
||||
},
|
||||
{
|
||||
name: "Backend in 3.6.0 should be skipped",
|
||||
version: "3.6.0",
|
||||
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName, buckets.MetaStorageVersionName},
|
||||
metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName, MetaStorageVersionName},
|
||||
expectVersion: &semver.Version{Major: 3, Minor: 6},
|
||||
},
|
||||
{
|
||||
name: "Backend with current version should be skipped",
|
||||
version: version.Version,
|
||||
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName, buckets.MetaStorageVersionName},
|
||||
metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName, MetaStorageVersionName},
|
||||
expectVersion: &semver.Version{Major: 3, Minor: 6},
|
||||
},
|
||||
{
|
||||
name: "Backend in 3.7.0 should be skipped",
|
||||
version: "3.7.0",
|
||||
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName, buckets.MetaStorageVersionName, []byte("future-key")},
|
||||
metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName, MetaStorageVersionName, []byte("future-key")},
|
||||
expectVersion: &semver.Version{Major: 3, Minor: 7},
|
||||
},
|
||||
}
|
||||
@ -86,19 +85,19 @@ func TestUpdateStorageVersion(t *testing.T) {
|
||||
t.Fatal("batch tx is nil")
|
||||
}
|
||||
tx.Lock()
|
||||
buckets.UnsafeCreateMetaBucket(tx)
|
||||
UnsafeCreateMetaBucket(tx)
|
||||
for _, k := range tc.metaKeys {
|
||||
switch string(k) {
|
||||
case string(buckets.MetaConfStateName):
|
||||
buckets.MustUnsafeSaveConfStateToBackend(lg, tx, &raftpb.ConfState{})
|
||||
case string(buckets.MetaTermKeyName):
|
||||
buckets.UnsafeUpdateConsistentIndex(tx, 1, 1, false)
|
||||
case string(MetaConfStateName):
|
||||
MustUnsafeSaveConfStateToBackend(lg, tx, &raftpb.ConfState{})
|
||||
case string(MetaTermKeyName):
|
||||
UnsafeUpdateConsistentIndex(tx, 1, 1, false)
|
||||
default:
|
||||
tx.UnsafePut(buckets.Meta, k, []byte{})
|
||||
tx.UnsafePut(Meta, k, []byte{})
|
||||
}
|
||||
}
|
||||
if tc.version != "" {
|
||||
buckets.UnsafeSetStorageVersion(tx, semver.New(tc.version))
|
||||
UnsafeSetStorageVersion(tx, semver.New(tc.version))
|
||||
}
|
||||
tx.Unlock()
|
||||
be.ForceCommit()
|
||||
@ -106,14 +105,14 @@ func TestUpdateStorageVersion(t *testing.T) {
|
||||
|
||||
b := backend.NewDefaultBackend(tmpPath)
|
||||
defer b.Close()
|
||||
err := UpdateStorageVersion(lg, b.BatchTx())
|
||||
err := UpdateStorageSchema(lg, b.BatchTx())
|
||||
if (err != nil) != tc.expectError {
|
||||
t.Errorf("UpgradeStorage(...) = %+v, expected error: %v", err, tc.expectError)
|
||||
}
|
||||
if err != nil && err.Error() != tc.expectedErrorMsg {
|
||||
t.Errorf("UpgradeStorage(...) = %q, expected error message: %q", err, tc.expectedErrorMsg)
|
||||
}
|
||||
v := buckets.UnsafeReadStorageVersion(b.BatchTx())
|
||||
v := UnsafeReadStorageVersion(b.BatchTx())
|
||||
assert.Equal(t, tc.expectVersion, v)
|
||||
})
|
||||
}
|
@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"go.etcd.io/bbolt"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
)
|
||||
|
||||
// ReadStorageVersion loads storage version from given backend transaction.
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package buckets
|
||||
package schema
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@ -22,8 +22,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/bbolt"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend/testing"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend/testing"
|
||||
)
|
||||
|
||||
// TestVersion ensures that UnsafeSetStorageVersion/UnsafeReadStorageVersion work well together.
|
@ -20,8 +20,8 @@ import (
|
||||
|
||||
"go.etcd.io/etcd/raft/v3/raftpb"
|
||||
"go.etcd.io/etcd/server/v3/datadir"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
wal2 "go.etcd.io/etcd/server/v3/wal"
|
||||
"go.etcd.io/etcd/server/v3/wal/walpb"
|
||||
"go.uber.org/zap"
|
||||
@ -109,7 +109,7 @@ func MustVerifyIfEnabled(cfg Config) {
|
||||
|
||||
func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error {
|
||||
tx := be.BatchTx()
|
||||
index, term := buckets.ReadConsistentIndex(tx)
|
||||
index, term := schema.ReadConsistentIndex(tx)
|
||||
if cfg.ExactIndex && index != hardstate.Commit {
|
||||
return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit)
|
||||
}
|
||||
|
@ -32,8 +32,8 @@ import (
|
||||
"go.etcd.io/etcd/api/v3/version"
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.etcd.io/etcd/tests/v3/integration"
|
||||
)
|
||||
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
|
||||
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
|
||||
"go.etcd.io/etcd/pkg/v3/traceutil"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
|
@ -21,8 +21,8 @@ import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"go.etcd.io/etcd/server/v3/lease"
|
||||
"go.etcd.io/etcd/server/v3/mvcc"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/mvcc"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -20,11 +20,11 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/authpb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/buckets"
|
||||
"go.etcd.io/etcd/server/v3/storage/schema"
|
||||
|
||||
"go.etcd.io/etcd/api/v3/mvccpb"
|
||||
"go.etcd.io/etcd/server/v3/lease/leasepb"
|
||||
"go.etcd.io/etcd/server/v3/mvcc/backend"
|
||||
"go.etcd.io/etcd/server/v3/storage/backend"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
@ -163,7 +163,7 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error)
|
||||
|
||||
func getHash(dbPath string) (hash uint32, err error) {
|
||||
b := backend.NewDefaultBackend(dbPath)
|
||||
return b.Hash(buckets.DefaultIgnores)
|
||||
return b.Hash(schema.DefaultIgnores)
|
||||
}
|
||||
|
||||
// TODO: revert by revision and find specified hash value
|
||||
|
Loading…
x
Reference in New Issue
Block a user