Merge pull request #13198 from serathius/bootstrap2

etcdserver: Restructure storage packages to make place for schema
This commit is contained in:
Piotr Tabor 2021-07-15 15:57:15 +02:00 committed by GitHub
commit 28f86ee2cb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
98 changed files with 920 additions and 883 deletions

View File

@ -15,3 +15,9 @@ See [code changes](https://github.com/etcd-io/etcd/compare/v3.5.0...v3.6.0).
### etcdutl v3 ### etcdutl v3
- Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13142). - Add command to generate [shell completion](https://github.com/etcd-io/etcd/pull/13142).
### Package `server`
- Package `mvcc` was moved to `storage/mvcc`
- Package `mvcc/backend` was moved to `storage/backend`
- Package `mvcc/buckets` was moved to `storage/schema`

View File

@ -31,8 +31,8 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/server/v3/verify" "go.etcd.io/etcd/server/v3/verify"
"go.etcd.io/etcd/server/v3/wal" "go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb" "go.etcd.io/etcd/server/v3/wal/walpb"
@ -311,7 +311,7 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desir
be := backend.NewDefaultBackend(destDB) be := backend.NewDefaultBackend(destDB)
defer be.Close() defer be.Close()
ms := buckets.NewMembershipStore(lg, be) ms := schema.NewMembershipStore(lg, be)
if err := ms.TrimClusterFromBackend(); err != nil { if err := ms.TrimClusterFromBackend(); err != nil {
lg.Fatal("bbolt tx.Membership failed", zap.Error(err)) lg.Fatal("bbolt tx.Membership failed", zap.Error(err))
} }
@ -325,8 +325,8 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desir
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
buckets.UnsafeCreateMetaBucket(tx) schema.UnsafeCreateMetaBucket(tx)
buckets.UnsafeUpdateConsistentIndex(tx, idx, term, false) schema.UnsafeUpdateConsistentIndex(tx, idx, term, false)
} else { } else {
// Thanks to translateWAL not moving entries, but just replacing them with // Thanks to translateWAL not moving entries, but just replacing them with
// 'empty', there is no need to update the consistency index. // 'empty', there is no need to update the consistency index.

View File

@ -22,7 +22,7 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.etcd.io/etcd/pkg/v3/cobrautl" "go.etcd.io/etcd/pkg/v3/cobrautl"
"go.etcd.io/etcd/server/v3/datadir" "go.etcd.io/etcd/server/v3/datadir"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
var ( var (

View File

@ -40,8 +40,8 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/etcdserver/cindex" "go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/server/v3/verify" "go.etcd.io/etcd/server/v3/verify"
"go.etcd.io/etcd/server/v3/wal" "go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb" "go.etcd.io/etcd/server/v3/wal/walpb"
@ -136,7 +136,7 @@ func (s *v3Manager) Status(dbPath string) (ds Status, err error) {
return fmt.Errorf("snapshot file integrity check failed. %d errors found.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings)) return fmt.Errorf("snapshot file integrity check failed. %d errors found.\n"+strings.Join(dbErrStrings, "\n"), len(dbErrStrings))
} }
ds.TotalSize = tx.Size() ds.TotalSize = tx.Size()
v := buckets.ReadStorageVersionFromSnapshot(tx) v := schema.ReadStorageVersionFromSnapshot(tx)
if v != nil { if v != nil {
ds.Version = v.String() ds.Version = v.String()
} }
@ -306,7 +306,7 @@ func (s *v3Manager) saveDB() error {
be := backend.NewDefaultBackend(s.outDbPath()) be := backend.NewDefaultBackend(s.outDbPath())
defer be.Close() defer be.Close()
err = buckets.NewMembershipStore(s.lg, be).TrimMembershipFromBackend() err = schema.NewMembershipStore(s.lg, be).TrimMembershipFromBackend()
if err != nil { if err != nil {
return err return err
} }
@ -403,7 +403,7 @@ func (s *v3Manager) saveWALAndSnap() (*raftpb.HardState, error) {
s.cl.SetStore(st) s.cl.SetStore(st)
be := backend.NewDefaultBackend(s.outDbPath()) be := backend.NewDefaultBackend(s.outDbPath())
defer be.Close() defer be.Close()
s.cl.SetBackend(buckets.NewMembershipStore(s.lg, be)) s.cl.SetBackend(schema.NewMembershipStore(s.lg, be))
for _, m := range s.cl.Members() { for _, m := range s.cl.Members() {
s.cl.AddMember(m, true) s.cl.AddMember(m, true)
} }

View File

@ -17,14 +17,14 @@ package auth
import ( import (
"go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/pkg/v3/adt" "go.etcd.io/etcd/pkg/v3/adt"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions { func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions {
user := buckets.UnsafeGetUser(lg, tx, userName) user := schema.UnsafeGetUser(lg, tx, userName)
if user == nil { if user == nil {
return nil return nil
} }
@ -33,7 +33,7 @@ func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifie
writePerms := adt.NewIntervalTree() writePerms := adt.NewIntervalTree()
for _, roleName := range user.Roles { for _, roleName := range user.Roles {
role := buckets.UnsafeGetRole(lg, tx, roleName) role := schema.UnsafeGetRole(lg, tx, roleName)
if role == nil { if role == nil {
continue continue
} }

View File

@ -28,8 +28,8 @@ import (
"go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/api/v3/authpb"
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
@ -225,7 +225,7 @@ func (as *authStore) AuthEnable() error {
b.ForceCommit() b.ForceCommit()
}() }()
u := buckets.UnsafeGetUser(as.lg, tx, rootUser) u := schema.UnsafeGetUser(as.lg, tx, rootUser)
if u == nil { if u == nil {
return ErrRootUserNotExist return ErrRootUserNotExist
} }
@ -234,7 +234,7 @@ func (as *authStore) AuthEnable() error {
return ErrRootRoleNotExist return ErrRootRoleNotExist
} }
buckets.UnsafeSaveAuthEnabled(tx, true) schema.UnsafeSaveAuthEnabled(tx, true)
as.enabled = true as.enabled = true
as.tokenProvider.enable() as.tokenProvider.enable()
@ -256,7 +256,7 @@ func (as *authStore) AuthDisable() {
b := as.be b := as.be
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
buckets.UnsafeSaveAuthEnabled(tx, false) schema.UnsafeSaveAuthEnabled(tx, false)
as.commitRevision(tx) as.commitRevision(tx)
tx.Unlock() tx.Unlock()
b.ForceCommit() b.ForceCommit()
@ -286,7 +286,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, username) user := schema.UnsafeGetUser(as.lg, tx, username)
if user == nil { if user == nil {
return nil, ErrAuthFailed return nil, ErrAuthFailed
} }
@ -324,7 +324,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) {
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user = buckets.UnsafeGetUser(as.lg, tx, username) user = schema.UnsafeGetUser(as.lg, tx, username)
if user == nil { if user == nil {
return 0, ErrAuthFailed return 0, ErrAuthFailed
} }
@ -351,7 +351,7 @@ func (as *authStore) Recover(be backend.Backend) {
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
enabled := buckets.UnsafeReadAuthEnabled(tx) enabled := schema.UnsafeReadAuthEnabled(tx)
as.setRevision(getRevision(tx)) as.setRevision(getRevision(tx))
tx.Unlock() tx.Unlock()
@ -381,7 +381,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, r.Name) user := schema.UnsafeGetUser(as.lg, tx, r.Name)
if user != nil { if user != nil {
return nil, ErrUserAlreadyExist return nil, ErrUserAlreadyExist
} }
@ -409,7 +409,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse,
Options: options, Options: options,
} }
buckets.UnsafePutUser(as.lg, tx, newUser) schema.UnsafePutUser(as.lg, tx, newUser)
as.commitRevision(tx) as.commitRevision(tx)
@ -427,12 +427,12 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, r.Name) user := schema.UnsafeGetUser(as.lg, tx, r.Name)
if user == nil { if user == nil {
return nil, ErrUserNotFound return nil, ErrUserNotFound
} }
buckets.UnsafeDeleteUser(tx, r.Name) schema.UnsafeDeleteUser(tx, r.Name)
as.commitRevision(tx) as.commitRevision(tx)
@ -452,7 +452,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, r.Name) user := schema.UnsafeGetUser(as.lg, tx, r.Name)
if user == nil { if user == nil {
return nil, ErrUserNotFound return nil, ErrUserNotFound
} }
@ -474,7 +474,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p
Options: user.Options, Options: user.Options,
} }
buckets.UnsafePutUser(as.lg, tx, updatedUser) schema.UnsafePutUser(as.lg, tx, updatedUser)
as.commitRevision(tx) as.commitRevision(tx)
@ -494,13 +494,13 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, r.User) user := schema.UnsafeGetUser(as.lg, tx, r.User)
if user == nil { if user == nil {
return nil, ErrUserNotFound return nil, ErrUserNotFound
} }
if r.Role != rootRole { if r.Role != rootRole {
role := buckets.UnsafeGetRole(as.lg, tx, r.Role) role := schema.UnsafeGetRole(as.lg, tx, r.Role)
if role == nil { if role == nil {
return nil, ErrRoleNotFound return nil, ErrRoleNotFound
} }
@ -520,7 +520,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
user.Roles = append(user.Roles, r.Role) user.Roles = append(user.Roles, r.Role)
sort.Strings(user.Roles) sort.Strings(user.Roles)
buckets.UnsafePutUser(as.lg, tx, user) schema.UnsafePutUser(as.lg, tx, user)
as.invalidateCachedPerm(r.User) as.invalidateCachedPerm(r.User)
@ -538,7 +538,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser
func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
user := buckets.UnsafeGetUser(as.lg, tx, r.Name) user := schema.UnsafeGetUser(as.lg, tx, r.Name)
tx.Unlock() tx.Unlock()
if user == nil { if user == nil {
@ -553,7 +553,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse,
func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
users := buckets.UnsafeGetAllUsers(as.lg, tx) users := schema.UnsafeGetAllUsers(as.lg, tx)
tx.Unlock() tx.Unlock()
resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
@ -577,7 +577,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, r.Name) user := schema.UnsafeGetUser(as.lg, tx, r.Name)
if user == nil { if user == nil {
return nil, ErrUserNotFound return nil, ErrUserNotFound
} }
@ -598,7 +598,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs
return nil, ErrRoleNotGranted return nil, ErrRoleNotGranted
} }
buckets.UnsafePutUser(as.lg, tx, updatedUser) schema.UnsafePutUser(as.lg, tx, updatedUser)
as.invalidateCachedPerm(r.Name) as.invalidateCachedPerm(r.Name)
@ -621,7 +621,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
var resp pb.AuthRoleGetResponse var resp pb.AuthRoleGetResponse
role := buckets.UnsafeGetRole(as.lg, tx, r.Role) role := schema.UnsafeGetRole(as.lg, tx, r.Role)
if role == nil { if role == nil {
return nil, ErrRoleNotFound return nil, ErrRoleNotFound
} }
@ -636,7 +636,7 @@ func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse,
func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
roles := buckets.UnsafeGetAllRoles(as.lg, tx) roles := schema.UnsafeGetAllRoles(as.lg, tx)
tx.Unlock() tx.Unlock()
resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
@ -651,7 +651,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
role := buckets.UnsafeGetRole(as.lg, tx, r.Role) role := schema.UnsafeGetRole(as.lg, tx, r.Role)
if role == nil { if role == nil {
return nil, ErrRoleNotFound return nil, ErrRoleNotFound
} }
@ -670,7 +670,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest)
return nil, ErrPermissionNotGranted return nil, ErrPermissionNotGranted
} }
buckets.UnsafePutRole(as.lg, tx, updatedRole) schema.UnsafePutRole(as.lg, tx, updatedRole)
// TODO(mitake): currently single role update invalidates every cache // TODO(mitake): currently single role update invalidates every cache
// It should be optimized. // It should be optimized.
@ -697,14 +697,14 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
role := buckets.UnsafeGetRole(as.lg, tx, r.Role) role := schema.UnsafeGetRole(as.lg, tx, r.Role)
if role == nil { if role == nil {
return nil, ErrRoleNotFound return nil, ErrRoleNotFound
} }
buckets.UnsafeDeleteRole(tx, r.Role) schema.UnsafeDeleteRole(tx, r.Role)
users := buckets.UnsafeGetAllUsers(as.lg, tx) users := schema.UnsafeGetAllUsers(as.lg, tx)
for _, user := range users { for _, user := range users {
updatedUser := &authpb.User{ updatedUser := &authpb.User{
Name: user.Name, Name: user.Name,
@ -722,7 +722,7 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete
continue continue
} }
buckets.UnsafePutUser(as.lg, tx, updatedUser) schema.UnsafePutUser(as.lg, tx, updatedUser)
as.invalidateCachedPerm(string(user.Name)) as.invalidateCachedPerm(string(user.Name))
} }
@ -742,7 +742,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
role := buckets.UnsafeGetRole(as.lg, tx, r.Name) role := schema.UnsafeGetRole(as.lg, tx, r.Name)
if role != nil { if role != nil {
return nil, ErrRoleAlreadyExist return nil, ErrRoleAlreadyExist
} }
@ -751,7 +751,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse,
Name: []byte(r.Name), Name: []byte(r.Name),
} }
buckets.UnsafePutRole(as.lg, tx, newRole) schema.UnsafePutRole(as.lg, tx, newRole)
as.commitRevision(tx) as.commitRevision(tx)
@ -786,7 +786,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
role := buckets.UnsafeGetRole(as.lg, tx, r.Name) role := schema.UnsafeGetRole(as.lg, tx, r.Name)
if role == nil { if role == nil {
return nil, ErrRoleNotFound return nil, ErrRoleNotFound
} }
@ -810,7 +810,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (
sort.Sort(permSlice(role.KeyPermission)) sort.Sort(permSlice(role.KeyPermission))
} }
buckets.UnsafePutRole(as.lg, tx, role) schema.UnsafePutRole(as.lg, tx, role)
// TODO(mitake): currently single role update invalidates every cache // TODO(mitake): currently single role update invalidates every cache
// It should be optimized. // It should be optimized.
@ -850,7 +850,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
user := buckets.UnsafeGetUser(as.lg, tx, userName) user := schema.UnsafeGetUser(as.lg, tx, userName)
if user == nil { if user == nil {
as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName)) as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName))
return ErrPermissionDenied return ErrPermissionDenied
@ -890,7 +890,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
u := buckets.UnsafeGetUser(as.lg, tx, authInfo.Username) u := schema.UnsafeGetUser(as.lg, tx, authInfo.Username)
tx.Unlock() tx.Unlock()
if u == nil { if u == nil {
@ -930,11 +930,11 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
buckets.UnsafeCreateAuthBucket(tx) schema.UnsafeCreateAuthBucket(tx)
tx.UnsafeCreateBucket(buckets.AuthUsers) tx.UnsafeCreateBucket(schema.AuthUsers)
tx.UnsafeCreateBucket(buckets.AuthRoles) tx.UnsafeCreateBucket(schema.AuthRoles)
enabled := buckets.UnsafeReadAuthEnabled(tx) enabled := schema.UnsafeReadAuthEnabled(tx)
as := &authStore{ as := &authStore{
revision: getRevision(tx), revision: getRevision(tx),
@ -970,11 +970,11 @@ func hasRootRole(u *authpb.User) bool {
func (as *authStore) commitRevision(tx backend.BatchTx) { func (as *authStore) commitRevision(tx backend.BatchTx) {
atomic.AddUint64(&as.revision, 1) atomic.AddUint64(&as.revision, 1)
buckets.UnsafeSaveAuthRevision(tx, as.Revision()) schema.UnsafeSaveAuthRevision(tx, as.Revision())
} }
func getRevision(tx backend.BatchTx) uint64 { func getRevision(tx backend.BatchTx) uint64 {
return buckets.UnsafeReadAuthRevision(tx) return schema.UnsafeReadAuthRevision(tx)
} }
func (as *authStore) setRevision(rev uint64) { func (as *authStore) setRevision(rev uint64) {
@ -1169,7 +1169,7 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context {
func (as *authStore) HasRole(user, role string) bool { func (as *authStore) HasRole(user, role string) bool {
tx := as.be.BatchTx() tx := as.be.BatchTx()
tx.Lock() tx.Lock()
u := buckets.UnsafeGetUser(as.lg, tx, user) u := schema.UnsafeGetUser(as.lg, tx, user)
tx.Unlock() tx.Unlock()
if u == nil { if u == nil {

View File

@ -27,8 +27,8 @@ import (
"go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/api/v3/authpb"
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap" "go.uber.org/zap"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"

View File

@ -20,8 +20,8 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -59,7 +59,7 @@ func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
return m return m
} }
buckets.MustPutAlarm(a.lg, a.bg.Backend().BatchTx(), newAlarm) schema.MustPutAlarm(a.lg, a.bg.Backend().BatchTx(), newAlarm)
return newAlarm return newAlarm
} }
@ -79,7 +79,7 @@ func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
delete(t, id) delete(t, id)
buckets.MustDeleteAlarm(a.lg, a.bg.Backend().BatchTx(), m) schema.MustDeleteAlarm(a.lg, a.bg.Backend().BatchTx(), m)
return m return m
} }
@ -105,8 +105,8 @@ func (a *AlarmStore) restore() error {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
buckets.UnsafeCreateAlarmBucket(tx) schema.UnsafeCreateAlarmBucket(tx)
ms, err := buckets.UnsafeGetAllAlarms(tx) ms, err := schema.UnsafeGetAllAlarms(tx)
tx.Unlock() tx.Unlock()
if err != nil { if err != nil {
return err return err

View File

@ -20,7 +20,7 @@ import (
"time" "time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"github.com/jonboulle/clockwork" "github.com/jonboulle/clockwork"
"go.uber.org/zap" "go.uber.org/zap"

View File

@ -20,7 +20,7 @@ import (
"time" "time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"github.com/jonboulle/clockwork" "github.com/jonboulle/clockwork"
"go.uber.org/zap" "go.uber.org/zap"

View File

@ -27,9 +27,9 @@ import (
"go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -101,7 +101,7 @@ func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRe
const snapshotSendBufferSize = 32 * 1024 const snapshotSendBufferSize = 32 * 1024
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
ver := buckets.ReadStorageVersion(ms.bg.Backend().ReadTx()) ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx())
storageVersion := "" storageVersion := ""
if ver != nil { if ver != nil {
storageVersion = ver.String() storageVersion = ver.String()

View File

@ -24,7 +24,7 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"

View File

@ -20,7 +20,7 @@ import (
"testing" "testing"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"

View File

@ -26,7 +26,7 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -32,7 +32,7 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"go.uber.org/zap" "go.uber.org/zap"

View File

@ -23,7 +23,7 @@ import (
"go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/auth"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
) )
type authApplierV3 struct { type authApplierV3 struct {

View File

@ -22,8 +22,8 @@ import (
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -99,7 +99,7 @@ func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) { func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) {
consistentIndex := uint64(0) consistentIndex := uint64(0)
if beExist { if beExist {
consistentIndex, _ = buckets.ReadConsistentIndex(oldbe.BatchTx()) consistentIndex, _ = schema.ReadConsistentIndex(oldbe.BatchTx())
} }
if snapshot.Metadata.Index <= consistentIndex { if snapshot.Metadata.Index <= consistentIndex {
return oldbe, nil return oldbe, nil

View File

@ -0,0 +1,599 @@
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"time"
"github.com/coreos/go-semver/semver"
"github.com/dustin/go-humanize"
"go.uber.org/zap"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
"go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb"
)
func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
cfg.Logger.Warn(
"exceeded recommended request limit",
zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
zap.String("recommended-request-size", recommendedMaxRequestBytesString),
)
}
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
haveWAL := wal.Exist(cfg.WALDir())
ss := bootstrapSnapshot(cfg)
be, ci, beExist, beHooks, err := bootstrapBackend(cfg)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
be.Close()
}
}()
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
if err != nil {
return nil, err
}
switch {
case !haveWAL && !cfg.NewCluster:
b, err = bootstrapExistingClusterNoWAL(cfg, prt, st, be)
case !haveWAL && cfg.NewCluster:
b, err = bootstrapNewClusterNoWAL(cfg, prt, st, be)
case haveWAL:
b, err = bootstrapWithWAL(cfg, st, be, ss, beExist, beHooks, ci)
default:
be.Close()
return nil, fmt.Errorf("unsupported bootstrap config")
}
if err != nil {
return nil, err
}
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
}
b.prt = prt
b.ci = ci
b.st = st
b.be = be
b.ss = ss
b.beHooks = beHooks
return b, nil
}
type bootstrappedServer struct {
raft *bootstrappedRaft
remotes []*membership.Member
prt http.RoundTripper
ci cindex.ConsistentIndexer
st v2store.Store
be backend.Backend
ss *snap.Snapshotter
beHooks *backendHooks
}
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
if err := fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
cfg.Logger.Fatal(
"failed to create snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
}
if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
return strings.HasPrefix(fileName, "tmp")
}); err != nil {
cfg.Logger.Error(
"failed to remove temp file(s) in snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
}
return snap.New(cfg.Logger, cfg.SnapDir())
}
func bootstrapBackend(cfg config.ServerConfig) (be backend.Backend, ci cindex.ConsistentIndexer, beExist bool, beHooks *backendHooks, err error) {
beExist = fileutil.Exist(cfg.BackendPath())
ci = cindex.NewConsistentIndex(nil)
beHooks = &backendHooks{lg: cfg.Logger, indexer: ci}
be = openBackend(cfg, beHooks)
ci.SetBackend(be)
schema.CreateMetaBucket(be.BatchTx())
if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
err := maybeDefragBackend(cfg, be)
if err != nil {
be.Close()
return nil, nil, false, nil, err
}
}
cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
return be, ci, beExist, beHooks, nil
}
func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
size := be.Size()
sizeInUse := be.SizeInUse()
freeableMemory := uint(size - sizeInUse)
thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
if freeableMemory < thresholdBytes {
cfg.Logger.Info("Skipping defragmentation",
zap.Int64("current-db-size-bytes", size),
zap.String("current-db-size", humanize.Bytes(uint64(size))),
zap.Int64("current-db-size-in-use-bytes", sizeInUse),
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
)
return nil
}
return be.Defrag()
}
func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
if err := cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
if gerr != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
}
if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
return nil, fmt.Errorf("incompatible with current running cluster")
}
remotes := existingCluster.Members()
cl.SetID(types.ID(0), existingCluster.ID())
cl.SetStore(st)
cl.SetBackend(schema.NewMembershipStore(cfg.Logger, be))
br := bootstrapRaftFromCluster(cfg, cl, nil)
cl.SetID(br.wal.id, existingCluster.ID())
return &bootstrappedServer{
raft: br,
remotes: remotes,
}, nil
}
func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
if err := cfg.VerifyBootstrap(); err != nil {
return nil, err
}
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
m := cl.MemberByName(cfg.Name)
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
var str string
str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
if err != nil {
return nil, &DiscoveryError{Op: "join", Err: err}
}
var urlsmap types.URLsMap
urlsmap, err = types.NewURLsMap(str)
if err != nil {
return nil, err
}
if config.CheckDuplicateURL(urlsmap) {
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
}
if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
return nil, err
}
}
cl.SetStore(st)
cl.SetBackend(schema.NewMembershipStore(cfg.Logger, be))
br := bootstrapRaftFromCluster(cfg, cl, cl.MemberIDs())
cl.SetID(br.wal.id, cl.ID())
return &bootstrappedServer{
remotes: nil,
raft: br,
}, nil
}
func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Backend, ss *snap.Snapshotter, beExist bool, beHooks *backendHooks, ci cindex.ConsistentIndexer) (*bootstrappedServer, error) {
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if err := fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
}
if cfg.ShouldDiscover() {
cfg.Logger.Warn(
"discovery token is ignored since cluster already initialized; valid logs are found",
zap.String("wal-dir", cfg.WALDir()),
)
}
// Find a snapshot to start/restart a raft node
walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
if err != nil {
return nil, err
}
// snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
// wal log entries
snapshot, err := ss.LoadNewestAvailable(walSnaps)
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
if snapshot != nil {
if err = st.Recovery(snapshot.Data); err != nil {
cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
}
if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
cfg.Logger.Error("illegal v2store content", zap.Error(err))
return nil, err
}
cfg.Logger.Info(
"recovered v2 store from snapshot",
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
)
if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
}
s1, s2 := be.Size(), be.SizeInUse()
cfg.Logger.Info(
"recovered v3 backend from snapshot",
zap.Int64("backend-size-bytes", s1),
zap.String("backend-size", humanize.Bytes(uint64(s1))),
zap.Int64("backend-size-in-use-bytes", s2),
zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
)
if beExist {
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
// etcd from pre-3.0 release.
kvindex := ci.ConsistentIndex()
if kvindex < snapshot.Metadata.Index {
if kvindex != 0 {
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
}
cfg.Logger.Warn(
"consistent index was never saved",
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
)
}
}
} else {
cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
}
r := &bootstrappedServer{}
if !cfg.ForceNewCluster {
r.raft = bootstrapRaftFromWal(cfg, snapshot)
} else {
r.raft = bootstrapRaftFromWalStandalone(cfg, snapshot)
}
r.raft.cl.SetStore(st)
r.raft.cl.SetBackend(schema.NewMembershipStore(cfg.Logger, be))
r.raft.cl.Recover(api.UpdateCapability)
if r.raft.cl.Version() != nil && !r.raft.cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
bepath := cfg.BackendPath()
os.RemoveAll(bepath)
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
}
return r, nil
}
func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) *bootstrappedRaft {
member := cl.MemberByName(cfg.Name)
id := member.ID
wal := bootstrapNewWAL(cfg, id, cl.ID())
peers := make([]raft.Peer, len(ids))
for i, id := range ids {
var ctx []byte
ctx, err := json.Marshal((*cl).Member(id))
if err != nil {
cfg.Logger.Panic("failed to marshal member", zap.Error(err))
}
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
}
cfg.Logger.Info(
"starting local member",
zap.String("local-member-id", id.String()),
zap.String("cluster-id", cl.ID().String()),
)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
peers: peers,
storage: s,
wal: wal,
}
}
func bootstrapRaftFromWal(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
cfg.Logger.Info(
"restarting local member",
zap.String("cluster-id", wal.cid.String()),
zap.String("local-member-id", wal.id.String()),
zap.Uint64("commit-index", wal.st.Commit),
)
cl := membership.NewCluster(cfg.Logger)
cl.SetID(wal.id, wal.cid)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
storage: s,
wal: wal,
}
}
func bootstrapRaftFromWalStandalone(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
// discard the previously uncommitted entries
wal.ents = wal.CommitedEntries()
entries := wal.ConfigChangeEntries()
// force commit config change entries
wal.AppendAndCommitEntries(entries)
cfg.Logger.Info(
"forcing restart member",
zap.String("cluster-id", wal.cid.String()),
zap.String("local-member-id", wal.id.String()),
zap.Uint64("commit-index", wal.st.Commit),
)
cl := membership.NewCluster(cfg.Logger)
cl.SetID(wal.id, wal.cid)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
storage: s,
wal: wal,
}
}
func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config {
return &raft.Config{
ID: id,
ElectionTick: cfg.ElectionTicks,
HeartbeatTick: 1,
Storage: s,
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
CheckQuorum: true,
PreVote: cfg.PreVote,
Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
}
}
type bootstrappedRaft struct {
lg *zap.Logger
heartbeat time.Duration
peers []raft.Peer
config *raft.Config
cl *membership.RaftCluster
storage *raft.MemoryStorage
wal *bootstrappedWAL
}
func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter) *raftNode {
var n raft.Node
if len(b.peers) == 0 {
n = raft.RestartNode(b.config)
} else {
n = raft.StartNode(b.config, b.peers)
}
raftStatusMu.Lock()
raftStatus = n.Status
raftStatusMu.Unlock()
return newRaftNode(
raftNodeConfig{
lg: b.lg,
isIDRemoved: func(id uint64) bool { return b.cl.IsIDRemoved(types.ID(id)) },
Node: n,
heartbeat: b.heartbeat,
raftStorage: b.storage,
storage: NewStorage(b.wal.w, ss),
},
)
}
// bootstrapWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
// after the position of the given snap in the WAL.
// The snap must have been previously saved to the WAL, or this call will panic.
func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Snapshot, unsafeNoFsync bool) *bootstrappedWAL {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
repaired := false
for {
w, err := wal.Open(lg, waldir, walsnap)
if err != nil {
lg.Fatal("failed to open WAL", zap.Error(err))
}
if unsafeNoFsync {
w.SetUnsafeNoFsync()
}
wmetadata, st, ents, err := w.ReadAll()
if err != nil {
w.Close()
// we can only repair ErrUnexpectedEOF and we never repair twice.
if repaired || err != io.ErrUnexpectedEOF {
lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
}
if !wal.Repair(lg, waldir) {
lg.Fatal("failed to repair WAL", zap.Error(err))
} else {
lg.Info("repaired WAL", zap.Error(err))
repaired = true
}
continue
}
var metadata etcdserverpb.Metadata
pbutil.MustUnmarshal(&metadata, wmetadata)
id := types.ID(metadata.NodeID)
cid := types.ID(metadata.ClusterID)
return &bootstrappedWAL{
lg: lg,
w: w,
id: id,
cid: cid,
st: &st,
ents: ents,
snapshot: snapshot,
}
}
}
func bootstrapNewWAL(cfg config.ServerConfig, nodeID, clusterID types.ID) *bootstrappedWAL {
metadata := pbutil.MustMarshal(
&etcdserverpb.Metadata{
NodeID: uint64(nodeID),
ClusterID: uint64(clusterID),
},
)
w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
if err != nil {
cfg.Logger.Panic("failed to create WAL", zap.Error(err))
}
if cfg.UnsafeNoFsync {
w.SetUnsafeNoFsync()
}
return &bootstrappedWAL{
lg: cfg.Logger,
w: w,
id: nodeID,
cid: clusterID,
}
}
type bootstrappedWAL struct {
lg *zap.Logger
w *wal.WAL
id, cid types.ID
st *raftpb.HardState
ents []raftpb.Entry
snapshot *raftpb.Snapshot
}
func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
s := raft.NewMemoryStorage()
if wal.snapshot != nil {
s.ApplySnapshot(*wal.snapshot)
}
if wal.st != nil {
s.SetHardState(*wal.st)
}
if len(wal.ents) != 0 {
s.Append(wal.ents)
}
return s
}
func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
for i, ent := range wal.ents {
if ent.Index > wal.st.Commit {
wal.lg.Info(
"discarding uncommitted WAL entries",
zap.Uint64("entry-index", ent.Index),
zap.Uint64("commit-index-from-wal", wal.st.Commit),
zap.Int("number-of-discarded-entries", len(wal.ents)-i),
)
return wal.ents[:i]
}
}
return wal.ents
}
func (wal *bootstrappedWAL) ConfigChangeEntries() []raftpb.Entry {
return createConfigChangeEnts(
wal.lg,
getIDs(wal.lg, wal.snapshot, wal.ents),
uint64(wal.id),
wal.st.Term,
wal.st.Commit,
)
}
func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) {
wal.ents = append(wal.ents, ents...)
err := wal.w.Save(raftpb.HardState{}, ents)
if err != nil {
wal.lg.Fatal("failed to save hard state and entries", zap.Error(err))
}
if len(wal.ents) != 0 {
wal.st.Commit = wal.ents[len(wal.ents)-1].Index
}
}

View File

@ -18,8 +18,8 @@ import (
"sync" "sync"
"sync/atomic" "sync/atomic"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
type Backend interface { type Backend interface {
@ -73,7 +73,7 @@ func (ci *consistentIndex) ConsistentIndex() uint64 {
ci.mutex.Lock() ci.mutex.Lock()
defer ci.mutex.Unlock() defer ci.mutex.Unlock()
v, term := buckets.ReadConsistentIndex(ci.be.BatchTx()) v, term := schema.ReadConsistentIndex(ci.be.BatchTx())
ci.SetConsistentIndex(v, term) ci.SetConsistentIndex(v, term)
return v return v
} }
@ -86,7 +86,7 @@ func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) {
func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) { func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) {
index := atomic.LoadUint64(&ci.consistentIndex) index := atomic.LoadUint64(&ci.consistentIndex)
term := atomic.LoadUint64(&ci.term) term := atomic.LoadUint64(&ci.term)
buckets.UnsafeUpdateConsistentIndex(tx, index, term, true) schema.UnsafeUpdateConsistentIndex(tx, index, term, true)
} }
func (ci *consistentIndex) SetBackend(be Backend) { func (ci *consistentIndex) SetBackend(be Backend) {
@ -119,5 +119,5 @@ func (f *fakeConsistentIndex) SetBackend(_ Backend) {}
func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) { func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) {
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
buckets.UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow) schema.UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow)
} }

View File

@ -20,9 +20,9 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
// TestConsistentIndex ensures that LoadConsistentIndex/Save/ConsistentIndex and backend.BatchTx can work well together. // TestConsistentIndex ensures that LoadConsistentIndex/Save/ConsistentIndex and backend.BatchTx can work well together.
@ -37,7 +37,7 @@ func TestConsistentIndex(t *testing.T) {
} }
tx.Lock() tx.Lock()
buckets.UnsafeCreateMetaBucket(tx) schema.UnsafeCreateMetaBucket(tx)
tx.Unlock() tx.Unlock()
be.ForceCommit() be.ForceCommit()
r := uint64(7890123) r := uint64(7890123)

View File

@ -28,7 +28,7 @@ import (
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -29,10 +29,8 @@ import (
"go.etcd.io/etcd/pkg/v3/pbutil" "go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -418,134 +416,6 @@ func (r *raftNode) advanceTicks(ticks int) {
} }
} }
func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) *bootstrappedRaft {
member := cl.MemberByName(cfg.Name)
id := member.ID
wal := bootstrapNewWAL(cfg, id, cl.ID())
peers := make([]raft.Peer, len(ids))
for i, id := range ids {
var ctx []byte
ctx, err := json.Marshal((*cl).Member(id))
if err != nil {
cfg.Logger.Panic("failed to marshal member", zap.Error(err))
}
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
}
cfg.Logger.Info(
"starting local member",
zap.String("local-member-id", id.String()),
zap.String("cluster-id", cl.ID().String()),
)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
peers: peers,
storage: s,
wal: wal,
}
}
func bootstrapRaftFromWal(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
cfg.Logger.Info(
"restarting local member",
zap.String("cluster-id", wal.cid.String()),
zap.String("local-member-id", wal.id.String()),
zap.Uint64("commit-index", wal.st.Commit),
)
cl := membership.NewCluster(cfg.Logger)
cl.SetID(wal.id, wal.cid)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
storage: s,
wal: wal,
}
}
func bootstrapRaftFromWalStandalone(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedRaft {
wal := bootstrapWALFromSnapshot(cfg.Logger, cfg.WALDir(), snapshot, cfg.UnsafeNoFsync)
// discard the previously uncommitted entries
wal.ents = wal.CommitedEntries()
entries := wal.ConfigChangeEntries()
// force commit config change entries
wal.AppendAndCommitEntries(entries)
cfg.Logger.Info(
"forcing restart member",
zap.String("cluster-id", wal.cid.String()),
zap.String("local-member-id", wal.id.String()),
zap.Uint64("commit-index", wal.st.Commit),
)
cl := membership.NewCluster(cfg.Logger)
cl.SetID(wal.id, wal.cid)
s := wal.MemoryStorage()
return &bootstrappedRaft{
lg: cfg.Logger,
heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
cl: cl,
config: raftConfig(cfg, uint64(wal.id), s),
storage: s,
wal: wal,
}
}
func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config {
return &raft.Config{
ID: id,
ElectionTick: cfg.ElectionTicks,
HeartbeatTick: 1,
Storage: s,
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
CheckQuorum: true,
PreVote: cfg.PreVote,
Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
}
}
type bootstrappedRaft struct {
lg *zap.Logger
heartbeat time.Duration
peers []raft.Peer
config *raft.Config
cl *membership.RaftCluster
storage *raft.MemoryStorage
wal *bootstrappedWAL
}
func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter) *raftNode {
var n raft.Node
if len(b.peers) == 0 {
n = raft.RestartNode(b.config)
} else {
n = raft.StartNode(b.config, b.peers)
}
raftStatusMu.Lock()
raftStatus = n.Status
raftStatusMu.Unlock()
return newRaftNode(
raftNodeConfig{
lg: b.lg,
isIDRemoved: func(id uint64) bool { return b.cl.IsIDRemoved(types.ID(id)) },
Node: n,
heartbeat: b.heartbeat,
raftStorage: b.storage,
storage: NewStorage(b.wal.w, ss),
},
)
}
// getIDs returns an ordered set of IDs included in the given snapshot and // getIDs returns an ordered set of IDs included in the given snapshot and
// the entries. The given snapshot/entries can contain three kinds of // the entries. The given snapshot/entries can contain three kinds of
// ID-related entry: // ID-related entry:

View File

@ -22,11 +22,9 @@ import (
"math" "math"
"math/rand" "math/rand"
"net/http" "net/http"
"os"
"path" "path"
"regexp" "regexp"
"strconv" "strconv"
"strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -55,7 +53,6 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes"
stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
"go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
@ -65,10 +62,9 @@ import (
serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/lease/leasehttp" "go.etcd.io/etcd/server/v3/lease/leasehttp"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/server/v3/wal"
) )
const ( const (
@ -296,8 +292,8 @@ type EtcdServer struct {
*AccessController *AccessController
// Ensure that storage version is updated only once. // Ensure that storage schema is updated only once.
storageVersionUpdated sync.Once updateStorageSchema sync.Once
} }
type backendHooks struct { type backendHooks struct {
@ -317,7 +313,7 @@ func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) {
bh.confStateLock.Lock() bh.confStateLock.Lock()
defer bh.confStateLock.Unlock() defer bh.confStateLock.Unlock()
if bh.confStateDirty { if bh.confStateDirty {
buckets.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
// save bh.confState // save bh.confState
bh.confStateDirty = false bh.confStateDirty = false
} }
@ -330,280 +326,6 @@ func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) {
bh.confStateDirty = true bh.confStateDirty = true
} }
func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
cfg.Logger.Warn(
"exceeded recommended request limit",
zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
zap.String("recommended-request-size", recommendedMaxRequestBytesString),
)
}
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
haveWAL := wal.Exist(cfg.WALDir())
ss := bootstrapSnapshot(cfg)
be, ci, beExist, beHooks, err := bootstrapBackend(cfg)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
be.Close()
}
}()
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
if err != nil {
return nil, err
}
switch {
case !haveWAL && !cfg.NewCluster:
b, err = bootstrapExistingClusterNoWAL(cfg, prt, st, be)
case !haveWAL && cfg.NewCluster:
b, err = bootstrapNewClusterNoWAL(cfg, prt, st, be)
case haveWAL:
b, err = bootstrapWithWAL(cfg, st, be, ss, beExist, beHooks, ci)
default:
be.Close()
return nil, fmt.Errorf("unsupported bootstrap config")
}
if err != nil {
return nil, err
}
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
}
b.prt = prt
b.ci = ci
b.st = st
b.be = be
b.ss = ss
b.beHooks = beHooks
return b, nil
}
type bootstrappedServer struct {
raft *bootstrappedRaft
remotes []*membership.Member
prt http.RoundTripper
ci cindex.ConsistentIndexer
st v2store.Store
be backend.Backend
ss *snap.Snapshotter
beHooks *backendHooks
}
func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
if err := fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
cfg.Logger.Fatal(
"failed to create snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
}
if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
return strings.HasPrefix(fileName, "tmp")
}); err != nil {
cfg.Logger.Error(
"failed to remove temp file(s) in snapshot directory",
zap.String("path", cfg.SnapDir()),
zap.Error(err),
)
}
return snap.New(cfg.Logger, cfg.SnapDir())
}
func bootstrapBackend(cfg config.ServerConfig) (be backend.Backend, ci cindex.ConsistentIndexer, beExist bool, beHooks *backendHooks, err error) {
beExist = fileutil.Exist(cfg.BackendPath())
ci = cindex.NewConsistentIndex(nil)
beHooks = &backendHooks{lg: cfg.Logger, indexer: ci}
be = openBackend(cfg, beHooks)
ci.SetBackend(be)
buckets.CreateMetaBucket(be.BatchTx())
if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 {
err := maybeDefragBackend(cfg, be)
if err != nil {
be.Close()
return nil, nil, false, nil, err
}
}
cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
return be, ci, beExist, beHooks, nil
}
func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
if err := cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
if gerr != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
}
if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
return nil, fmt.Errorf("incompatible with current running cluster")
}
remotes := existingCluster.Members()
cl.SetID(types.ID(0), existingCluster.ID())
cl.SetStore(st)
cl.SetBackend(buckets.NewMembershipStore(cfg.Logger, be))
br := bootstrapRaftFromCluster(cfg, cl, nil)
cl.SetID(br.wal.id, existingCluster.ID())
return &bootstrappedServer{
raft: br,
remotes: remotes,
}, nil
}
func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper, st v2store.Store, be backend.Backend) (*bootstrappedServer, error) {
if err := cfg.VerifyBootstrap(); err != nil {
return nil, err
}
cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
m := cl.MemberByName(cfg.Name)
if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
var str string
str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
if err != nil {
return nil, &DiscoveryError{Op: "join", Err: err}
}
var urlsmap types.URLsMap
urlsmap, err = types.NewURLsMap(str)
if err != nil {
return nil, err
}
if config.CheckDuplicateURL(urlsmap) {
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
}
if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
return nil, err
}
}
cl.SetStore(st)
cl.SetBackend(buckets.NewMembershipStore(cfg.Logger, be))
br := bootstrapRaftFromCluster(cfg, cl, cl.MemberIDs())
cl.SetID(br.wal.id, cl.ID())
return &bootstrappedServer{
remotes: nil,
raft: br,
}, nil
}
func bootstrapWithWAL(cfg config.ServerConfig, st v2store.Store, be backend.Backend, ss *snap.Snapshotter, beExist bool, beHooks *backendHooks, ci cindex.ConsistentIndexer) (*bootstrappedServer, error) {
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if err := fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
}
if cfg.ShouldDiscover() {
cfg.Logger.Warn(
"discovery token is ignored since cluster already initialized; valid logs are found",
zap.String("wal-dir", cfg.WALDir()),
)
}
// Find a snapshot to start/restart a raft node
walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
if err != nil {
return nil, err
}
// snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
// wal log entries
snapshot, err := ss.LoadNewestAvailable(walSnaps)
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
if snapshot != nil {
if err = st.Recovery(snapshot.Data); err != nil {
cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
}
if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
cfg.Logger.Error("illegal v2store content", zap.Error(err))
return nil, err
}
cfg.Logger.Info(
"recovered v2 store from snapshot",
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
)
if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
}
s1, s2 := be.Size(), be.SizeInUse()
cfg.Logger.Info(
"recovered v3 backend from snapshot",
zap.Int64("backend-size-bytes", s1),
zap.String("backend-size", humanize.Bytes(uint64(s1))),
zap.Int64("backend-size-in-use-bytes", s2),
zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
)
if beExist {
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
// etcd from pre-3.0 release.
kvindex := ci.ConsistentIndex()
if kvindex < snapshot.Metadata.Index {
if kvindex != 0 {
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
}
cfg.Logger.Warn(
"consistent index was never saved",
zap.Uint64("snapshot-index", snapshot.Metadata.Index),
)
}
}
} else {
cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
}
r := &bootstrappedServer{}
if !cfg.ForceNewCluster {
r.raft = bootstrapRaftFromWal(cfg, snapshot)
} else {
r.raft = bootstrapRaftFromWalStandalone(cfg, snapshot)
}
r.raft.cl.SetStore(st)
r.raft.cl.SetBackend(buckets.NewMembershipStore(cfg.Logger, be))
r.raft.cl.Recover(api.UpdateCapability)
if r.raft.cl.Version() != nil && !r.raft.cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
bepath := cfg.BackendPath()
os.RemoveAll(bepath)
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
}
return r, nil
}
// NewServer creates a new EtcdServer from the supplied configuration. The // NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer. // configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
@ -1353,7 +1075,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
lg.Info("restored v2 store") lg.Info("restored v2 store")
s.cluster.SetBackend(buckets.NewMembershipStore(lg, newbe)) s.cluster.SetBackend(schema.NewMembershipStore(lg, newbe))
lg.Info("restoring cluster configuration") lg.Info("restoring cluster configuration")
@ -2414,8 +2136,8 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
"saved snapshot", "saved snapshot",
zap.Uint64("snapshot-index", snap.Metadata.Index), zap.Uint64("snapshot-index", snap.Metadata.Index),
) )
s.storageVersionUpdated.Do(func() { s.updateStorageSchema.Do(func() {
err := serverversion.UpdateStorageVersion(s.lg, s.be.BatchTx()) err := schema.UpdateStorageSchema(s.lg, s.be.BatchTx())
if err != nil { if err != nil {
s.lg.Warn("failed to update storage version", zap.Error(err)) s.lg.Warn("failed to update storage version", zap.Error(err))
} }
@ -2693,22 +2415,3 @@ func (s *EtcdServer) IsMemberExist(id types.ID) bool {
func (s *EtcdServer) raftStatus() raft.Status { func (s *EtcdServer) raftStatus() raft.Status {
return s.r.Node.Status() return s.r.Node.Status()
} }
func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
size := be.Size()
sizeInUse := be.SizeInUse()
freeableMemory := uint(size - sizeInUse)
thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024
if freeableMemory < thresholdBytes {
cfg.Logger.Info("Skipping defragmentation",
zap.Int64("current-db-size-bytes", size),
zap.String("current-db-size", humanize.Bytes(uint64(size))),
zap.Int64("current-db-size-in-use-bytes", sizeInUse),
zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
)
return nil
}
return be.Defrag()
}

View File

@ -51,9 +51,9 @@ import (
"go.etcd.io/etcd/server/v3/mock/mockstorage" "go.etcd.io/etcd/server/v3/mock/mockstorage"
"go.etcd.io/etcd/server/v3/mock/mockstore" "go.etcd.io/etcd/server/v3/mock/mockstore"
"go.etcd.io/etcd/server/v3/mock/mockwait" "go.etcd.io/etcd/server/v3/mock/mockwait"
"go.etcd.io/etcd/server/v3/mvcc" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
) )
@ -651,7 +651,7 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
be, _ := betesting.NewDefaultTmpBackend(t) be, _ := betesting.NewDefaultTmpBackend(t)
defer betesting.Close(t, be) defer betesting.Close(t, be)
buckets.CreateMetaBucket(be.BatchTx()) schema.CreateMetaBucket(be.BatchTx())
ci := cindex.NewConsistentIndex(be) ci := cindex.NewConsistentIndex(be)
srv := &EtcdServer{ srv := &EtcdServer{
@ -696,9 +696,9 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) {
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
srv.beHooks.OnPreCommitUnsafe(tx) srv.beHooks.OnPreCommitUnsafe(tx)
assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *buckets.UnsafeConfStateFromBackend(lg, tx)) assert.Equal(t, raftpb.ConfState{Voters: []uint64{2}}, *schema.UnsafeConfStateFromBackend(lg, tx))
}) })
rindex, rterm := buckets.ReadConsistentIndex(be.BatchTx()) rindex, rterm := schema.ReadConsistentIndex(be.BatchTx())
assert.Equal(t, consistIndex, rindex) assert.Equal(t, consistIndex, rindex)
assert.Equal(t, uint64(4), rterm) assert.Equal(t, uint64(4), rterm)
} }

View File

@ -19,7 +19,7 @@ import (
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"go.uber.org/zap" "go.uber.org/zap"

View File

@ -15,19 +15,10 @@
package etcdserver package etcdserver
import ( import (
"io"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/etcd/raft/v3"
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/config"
"go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
"go.etcd.io/etcd/server/v3/wal" "go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb" "go.etcd.io/etcd/server/v3/wal/walpb"
"go.uber.org/zap"
) )
type Storage interface { type Storage interface {
@ -81,133 +72,3 @@ func (st *storage) Release(snap raftpb.Snapshot) error {
} }
return st.Snapshotter.ReleaseSnapDBs(snap) return st.Snapshotter.ReleaseSnapDBs(snap)
} }
// bootstrapWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
// after the position of the given snap in the WAL.
// The snap must have been previously saved to the WAL, or this call will panic.
func bootstrapWALFromSnapshot(lg *zap.Logger, waldir string, snapshot *raftpb.Snapshot, unsafeNoFsync bool) *bootstrappedWAL {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
repaired := false
for {
w, err := wal.Open(lg, waldir, walsnap)
if err != nil {
lg.Fatal("failed to open WAL", zap.Error(err))
}
if unsafeNoFsync {
w.SetUnsafeNoFsync()
}
wmetadata, st, ents, err := w.ReadAll()
if err != nil {
w.Close()
// we can only repair ErrUnexpectedEOF and we never repair twice.
if repaired || err != io.ErrUnexpectedEOF {
lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
}
if !wal.Repair(lg, waldir) {
lg.Fatal("failed to repair WAL", zap.Error(err))
} else {
lg.Info("repaired WAL", zap.Error(err))
repaired = true
}
continue
}
var metadata pb.Metadata
pbutil.MustUnmarshal(&metadata, wmetadata)
id := types.ID(metadata.NodeID)
cid := types.ID(metadata.ClusterID)
return &bootstrappedWAL{
lg: lg,
w: w,
id: id,
cid: cid,
st: &st,
ents: ents,
snapshot: snapshot,
}
}
}
func bootstrapNewWAL(cfg config.ServerConfig, nodeID, clusterID types.ID) *bootstrappedWAL {
metadata := pbutil.MustMarshal(
&pb.Metadata{
NodeID: uint64(nodeID),
ClusterID: uint64(clusterID),
},
)
w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
if err != nil {
cfg.Logger.Panic("failed to create WAL", zap.Error(err))
}
if cfg.UnsafeNoFsync {
w.SetUnsafeNoFsync()
}
return &bootstrappedWAL{
lg: cfg.Logger,
w: w,
id: nodeID,
cid: clusterID,
}
}
type bootstrappedWAL struct {
lg *zap.Logger
w *wal.WAL
id, cid types.ID
st *raftpb.HardState
ents []raftpb.Entry
snapshot *raftpb.Snapshot
}
func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
s := raft.NewMemoryStorage()
if wal.snapshot != nil {
s.ApplySnapshot(*wal.snapshot)
}
if wal.st != nil {
s.SetHardState(*wal.st)
}
if len(wal.ents) != 0 {
s.Append(wal.ents)
}
return s
}
func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
for i, ent := range wal.ents {
if ent.Index > wal.st.Commit {
wal.lg.Info(
"discarding uncommitted WAL entries",
zap.Uint64("entry-index", ent.Index),
zap.Uint64("commit-index-from-wal", wal.st.Commit),
zap.Int("number-of-discarded-entries", len(wal.ents)-i),
)
return wal.ents[:i]
}
}
return wal.ents
}
func (wal *bootstrappedWAL) ConfigChangeEntries() []raftpb.Entry {
return createConfigChangeEnts(
wal.lg,
getIDs(wal.lg, wal.snapshot, wal.ents),
uint64(wal.id),
wal.st.Term,
wal.st.Commit,
)
}
func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) {
wal.ents = append(wal.ents, ents...)
err := wal.w.Save(raftpb.HardState{}, ents)
if err != nil {
wal.lg.Fatal("failed to save hard state and entries", zap.Error(err))
}
if len(wal.ents) != 0 {
wal.st.Commit = wal.ents[len(wal.ents)-1].Index
}
}

View File

@ -30,7 +30,7 @@ import (
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/lease/leasehttp" "go.etcd.io/etcd/server/v3/lease/leasehttp"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"go.uber.org/zap" "go.uber.org/zap"

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
) )

View File

@ -26,8 +26,8 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/server/v3/lease/leasepb" "go.etcd.io/etcd/server/v3/lease/leasepb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -336,7 +336,7 @@ func (le *lessor) Revoke(id LeaseID) error {
// lease deletion needs to be in the same backend transaction with the // lease deletion needs to be in the same backend transaction with the
// kv deletion. Or we might end up with not executing the revoke or not // kv deletion. Or we might end up with not executing the revoke or not
// deleting the keys if etcdserver fails in between. // deleting the keys if etcdserver fails in between.
buckets.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)}) schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)})
txn.End() txn.End()
@ -770,8 +770,8 @@ func (le *lessor) initAndRecover() {
tx := le.b.BatchTx() tx := le.b.BatchTx()
tx.Lock() tx.Lock()
buckets.UnsafeCreateLeaseBucket(tx) schema.UnsafeCreateLeaseBucket(tx)
lpbs := buckets.MustUnsafeGetAllLeases(tx) lpbs := schema.MustUnsafeGetAllLeases(tx)
tx.Unlock() tx.Unlock()
for _, lpb := range lpbs { for _, lpb := range lpbs {
ID := LeaseID(lpb.ID) ID := LeaseID(lpb.ID)
@ -818,7 +818,7 @@ func (l *Lease) persistTo(b backend.Backend) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
buckets.MustUnsafePutLease(tx, &lpb) schema.MustUnsafePutLease(tx, &lpb)
} }
// TTL returns the TTL of the Lease. // TTL returns the TTL of the Lease.

View File

@ -19,7 +19,7 @@ import (
"testing" "testing"
"time" "time"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -27,8 +27,8 @@ import (
"time" "time"
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -95,7 +95,7 @@ func TestLessorGrant(t *testing.T) {
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
lpb := buckets.MustUnsafeGetLease(tx, int64(l.ID)) lpb := schema.MustUnsafeGetLease(tx, int64(l.ID))
if lpb == nil { if lpb == nil {
t.Errorf("lpb = %d, want not nil", lpb) t.Errorf("lpb = %d, want not nil", lpb)
} }
@ -199,7 +199,7 @@ func TestLessorRevoke(t *testing.T) {
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
lpb := buckets.MustUnsafeGetLease(tx, int64(l.ID)) lpb := schema.MustUnsafeGetLease(tx, int64(l.ID))
if lpb != nil { if lpb != nil {
t.Errorf("lpb = %d, want nil", lpb) t.Errorf("lpb = %d, want nil", lpb)
} }

View File

@ -20,7 +20,7 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/mvcc"
) )
type watchRange struct { type watchRange struct {

View File

@ -20,8 +20,8 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
func BenchmarkBackendPut(b *testing.B) { func BenchmarkBackendPut(b *testing.B) {
@ -42,13 +42,13 @@ func BenchmarkBackendPut(b *testing.B) {
batchTx := backend.BatchTx() batchTx := backend.BatchTx()
batchTx.Lock() batchTx.Lock()
batchTx.UnsafeCreateBucket(buckets.Test) batchTx.UnsafeCreateBucket(schema.Test)
batchTx.Unlock() batchTx.Unlock()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
batchTx.Lock() batchTx.Lock()
batchTx.UnsafePut(buckets.Test, keys[i], value) batchTx.UnsafePut(schema.Test, keys[i], value)
batchTx.Unlock() batchTx.Unlock()
} }
} }

View File

@ -23,9 +23,9 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
func TestBackendClose(t *testing.T) { func TestBackendClose(t *testing.T) {
@ -53,8 +53,8 @@ func TestBackendSnapshot(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar")) tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
tx.Unlock() tx.Unlock()
b.ForceCommit() b.ForceCommit()
@ -78,7 +78,7 @@ func TestBackendSnapshot(t *testing.T) {
newTx := nb.BatchTx() newTx := nb.BatchTx()
newTx.Lock() newTx.Lock()
ks, _ := newTx.UnsafeRange(buckets.Test, []byte("foo"), []byte("goo"), 0) ks, _ := newTx.UnsafeRange(schema.Test, []byte("foo"), []byte("goo"), 0)
if len(ks) != 1 { if len(ks) != 1 {
t.Errorf("len(kvs) = %d, want 1", len(ks)) t.Errorf("len(kvs) = %d, want 1", len(ks))
} }
@ -95,8 +95,8 @@ func TestBackendBatchIntervalCommit(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar")) tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
tx.Unlock() tx.Unlock()
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -127,9 +127,9 @@ func TestBackendDefrag(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
for i := 0; i < backend.DefragLimitForTest()+100; i++ { for i := 0; i < backend.DefragLimitForTest()+100; i++ {
tx.UnsafePut(buckets.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar")) tx.UnsafePut(schema.Test, []byte(fmt.Sprintf("foo_%d", i)), []byte("bar"))
} }
tx.Unlock() tx.Unlock()
b.ForceCommit() b.ForceCommit()
@ -138,7 +138,7 @@ func TestBackendDefrag(t *testing.T) {
tx = b.BatchTx() tx = b.BatchTx()
tx.Lock() tx.Lock()
for i := 0; i < 50; i++ { for i := 0; i < 50; i++ {
tx.UnsafeDelete(buckets.Test, []byte(fmt.Sprintf("foo_%d", i))) tx.UnsafeDelete(schema.Test, []byte(fmt.Sprintf("foo_%d", i)))
} }
tx.Unlock() tx.Unlock()
b.ForceCommit() b.ForceCommit()
@ -172,8 +172,8 @@ func TestBackendDefrag(t *testing.T) {
// try put more keys after shrink. // try put more keys after shrink.
tx = b.BatchTx() tx = b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
tx.UnsafePut(buckets.Test, []byte("more"), []byte("bar")) tx.UnsafePut(schema.Test, []byte("more"), []byte("bar"))
tx.Unlock() tx.Unlock()
b.ForceCommit() b.ForceCommit()
} }
@ -185,15 +185,15 @@ func TestBackendWriteback(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Key) tx.UnsafeCreateBucket(schema.Key)
tx.UnsafePut(buckets.Key, []byte("abc"), []byte("bar")) tx.UnsafePut(schema.Key, []byte("abc"), []byte("bar"))
tx.UnsafePut(buckets.Key, []byte("def"), []byte("baz")) tx.UnsafePut(schema.Key, []byte("def"), []byte("baz"))
tx.UnsafePut(buckets.Key, []byte("overwrite"), []byte("1")) tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("1"))
tx.Unlock() tx.Unlock()
// overwrites should be propagated too // overwrites should be propagated too
tx.Lock() tx.Lock()
tx.UnsafePut(buckets.Key, []byte("overwrite"), []byte("2")) tx.UnsafePut(schema.Key, []byte("overwrite"), []byte("2"))
tx.Unlock() tx.Unlock()
keys := []struct { keys := []struct {
@ -246,7 +246,7 @@ func TestBackendWriteback(t *testing.T) {
func() { func() {
rtx.RLock() rtx.RLock()
defer rtx.RUnlock() defer rtx.RUnlock()
k, v := rtx.UnsafeRange(buckets.Key, tt.key, tt.end, tt.limit) k, v := rtx.UnsafeRange(schema.Key, tt.key, tt.end, tt.limit)
if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) { if !reflect.DeepEqual(tt.wkey, k) || !reflect.DeepEqual(tt.wval, v) {
t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v) t.Errorf("#%d: want k=%+v, v=%+v; got k=%+v, v=%+v", i, tt.wkey, tt.wval, k, v)
} }
@ -261,20 +261,20 @@ func TestConcurrentReadTx(t *testing.T) {
wtx1 := b.BatchTx() wtx1 := b.BatchTx()
wtx1.Lock() wtx1.Lock()
wtx1.UnsafeCreateBucket(buckets.Key) wtx1.UnsafeCreateBucket(schema.Key)
wtx1.UnsafePut(buckets.Key, []byte("abc"), []byte("ABC")) wtx1.UnsafePut(schema.Key, []byte("abc"), []byte("ABC"))
wtx1.UnsafePut(buckets.Key, []byte("overwrite"), []byte("1")) wtx1.UnsafePut(schema.Key, []byte("overwrite"), []byte("1"))
wtx1.Unlock() wtx1.Unlock()
wtx2 := b.BatchTx() wtx2 := b.BatchTx()
wtx2.Lock() wtx2.Lock()
wtx2.UnsafePut(buckets.Key, []byte("def"), []byte("DEF")) wtx2.UnsafePut(schema.Key, []byte("def"), []byte("DEF"))
wtx2.UnsafePut(buckets.Key, []byte("overwrite"), []byte("2")) wtx2.UnsafePut(schema.Key, []byte("overwrite"), []byte("2"))
wtx2.Unlock() wtx2.Unlock()
rtx := b.ConcurrentReadTx() rtx := b.ConcurrentReadTx()
rtx.RLock() // no-op rtx.RLock() // no-op
k, v := rtx.UnsafeRange(buckets.Key, []byte("abc"), []byte("\xff"), 0) k, v := rtx.UnsafeRange(schema.Key, []byte("abc"), []byte("\xff"), 0)
rtx.RUnlock() rtx.RUnlock()
wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")} wKey := [][]byte{[]byte("abc"), []byte("def"), []byte("overwrite")}
wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")} wVal := [][]byte{[]byte("ABC"), []byte("DEF"), []byte("2")}
@ -291,10 +291,10 @@ func TestBackendWritebackForEach(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Key) tx.UnsafeCreateBucket(schema.Key)
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
k := []byte(fmt.Sprintf("%04d", i)) k := []byte(fmt.Sprintf("%04d", i))
tx.UnsafePut(buckets.Key, k, []byte("bar")) tx.UnsafePut(schema.Key, k, []byte("bar"))
} }
tx.Unlock() tx.Unlock()
@ -302,10 +302,10 @@ func TestBackendWritebackForEach(t *testing.T) {
b.ForceCommit() b.ForceCommit()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Key) tx.UnsafeCreateBucket(schema.Key)
for i := 5; i < 20; i++ { for i := 5; i < 20; i++ {
k := []byte(fmt.Sprintf("%04d", i)) k := []byte(fmt.Sprintf("%04d", i))
tx.UnsafePut(buckets.Key, k, []byte("bar")) tx.UnsafePut(schema.Key, k, []byte("bar"))
} }
tx.Unlock() tx.Unlock()
@ -316,7 +316,7 @@ func TestBackendWritebackForEach(t *testing.T) {
} }
rtx := b.ReadTx() rtx := b.ReadTx()
rtx.RLock() rtx.RLock()
assert.NoError(t, rtx.UnsafeForEach(buckets.Key, getSeq)) assert.NoError(t, rtx.UnsafeForEach(schema.Key, getSeq))
rtx.RUnlock() rtx.RUnlock()
partialSeq := seq partialSeq := seq
@ -325,7 +325,7 @@ func TestBackendWritebackForEach(t *testing.T) {
b.ForceCommit() b.ForceCommit()
tx.Lock() tx.Lock()
assert.NoError(t, tx.UnsafeForEach(buckets.Key, getSeq)) assert.NoError(t, tx.UnsafeForEach(schema.Key, getSeq))
tx.Unlock() tx.Unlock()
if seq != partialSeq { if seq != partialSeq {

View File

@ -20,9 +20,9 @@ import (
"time" "time"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
func TestBatchTxPut(t *testing.T) { func TestBatchTxPut(t *testing.T) {
@ -34,18 +34,18 @@ func TestBatchTxPut(t *testing.T) {
tx.Lock() tx.Lock()
// create bucket // create bucket
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
// put // put
v := []byte("bar") v := []byte("bar")
tx.UnsafePut(buckets.Test, []byte("foo"), v) tx.UnsafePut(schema.Test, []byte("foo"), v)
tx.Unlock() tx.Unlock()
// check put result before and after tx is committed // check put result before and after tx is committed
for k := 0; k < 2; k++ { for k := 0; k < 2; k++ {
tx.Lock() tx.Lock()
_, gv := tx.UnsafeRange(buckets.Test, []byte("foo"), nil, 0) _, gv := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0)
tx.Unlock() tx.Unlock()
if !reflect.DeepEqual(gv[0], v) { if !reflect.DeepEqual(gv[0], v) {
t.Errorf("v = %s, want %s", string(gv[0]), string(v)) t.Errorf("v = %s, want %s", string(gv[0]), string(v))
@ -62,12 +62,12 @@ func TestBatchTxRange(t *testing.T) {
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
// put keys // put keys
allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")} allKeys := [][]byte{[]byte("foo"), []byte("foo1"), []byte("foo2")}
allVals := [][]byte{[]byte("bar"), []byte("bar1"), []byte("bar2")} allVals := [][]byte{[]byte("bar"), []byte("bar1"), []byte("bar2")}
for i := range allKeys { for i := range allKeys {
tx.UnsafePut(buckets.Test, allKeys[i], allVals[i]) tx.UnsafePut(schema.Test, allKeys[i], allVals[i])
} }
tests := []struct { tests := []struct {
@ -115,7 +115,7 @@ func TestBatchTxRange(t *testing.T) {
}, },
} }
for i, tt := range tests { for i, tt := range tests {
keys, vals := tx.UnsafeRange(buckets.Test, tt.key, tt.endKey, tt.limit) keys, vals := tx.UnsafeRange(schema.Test, tt.key, tt.endKey, tt.limit)
if !reflect.DeepEqual(keys, tt.wkeys) { if !reflect.DeepEqual(keys, tt.wkeys) {
t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys) t.Errorf("#%d: keys = %+v, want %+v", i, keys, tt.wkeys)
} }
@ -132,17 +132,17 @@ func TestBatchTxDelete(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar")) tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
tx.UnsafeDelete(buckets.Test, []byte("foo")) tx.UnsafeDelete(schema.Test, []byte("foo"))
tx.Unlock() tx.Unlock()
// check put result before and after tx is committed // check put result before and after tx is committed
for k := 0; k < 2; k++ { for k := 0; k < 2; k++ {
tx.Lock() tx.Lock()
ks, _ := tx.UnsafeRange(buckets.Test, []byte("foo"), nil, 0) ks, _ := tx.UnsafeRange(schema.Test, []byte("foo"), nil, 0)
tx.Unlock() tx.Unlock()
if len(ks) != 0 { if len(ks) != 0 {
t.Errorf("keys on foo = %v, want nil", ks) t.Errorf("keys on foo = %v, want nil", ks)
@ -157,15 +157,15 @@ func TestBatchTxCommit(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar")) tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
tx.Unlock() tx.Unlock()
tx.Commit() tx.Commit()
// check whether put happens via db view // check whether put happens via db view
backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error { backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(buckets.Test.Name()) bucket := tx.Bucket(schema.Test.Name())
if bucket == nil { if bucket == nil {
t.Errorf("bucket test does not exit") t.Errorf("bucket test does not exit")
return nil return nil
@ -186,14 +186,14 @@ func TestBatchTxBatchLimitCommit(t *testing.T) {
tx := b.BatchTx() tx := b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Test) tx.UnsafeCreateBucket(schema.Test)
tx.UnsafePut(buckets.Test, []byte("foo"), []byte("bar")) tx.UnsafePut(schema.Test, []byte("foo"), []byte("bar"))
tx.Unlock() tx.Unlock()
// batch limit commit should have been triggered // batch limit commit should have been triggered
// check whether put happens via db view // check whether put happens via db view
backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error { backend.DbFromBackendForTest(b).View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(buckets.Test.Name()) bucket := tx.Bucket(schema.Test.Name())
if bucket == nil { if bucket == nil {
t.Errorf("bucket test does not exit") t.Errorf("bucket test does not exit")
return nil return nil

View File

@ -20,13 +20,13 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
var ( var (
bucket = buckets.Test bucket = schema.Test
key = []byte("key") key = []byte("key")
) )

View File

@ -21,7 +21,7 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
) )

View File

@ -20,7 +20,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
type RangeOptions struct { type RangeOptions struct {

View File

@ -26,8 +26,8 @@ import (
"go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/pkg/v3/testutil"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"

View File

@ -27,8 +27,8 @@ import (
"go.etcd.io/etcd/pkg/v3/schedule" "go.etcd.io/etcd/pkg/v3/schedule"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -122,8 +122,8 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi
tx := s.b.BatchTx() tx := s.b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Key) tx.UnsafeCreateBucket(schema.Key)
tx.UnsafeCreateBucket(buckets.Meta) tx.UnsafeCreateBucket(schema.Meta)
tx.Unlock() tx.Unlock()
s.b.ForceCommit() s.b.ForceCommit()
@ -161,7 +161,7 @@ func (s *store) Hash() (hash uint32, revision int64, err error) {
start := time.Now() start := time.Now()
s.b.ForceCommit() s.b.ForceCommit()
h, err := s.b.Hash(buckets.DefaultIgnores) h, err := s.b.Hash(schema.DefaultIgnores)
hashSec.Observe(time.Since(start).Seconds()) hashSec.Observe(time.Since(start).Seconds())
return h, s.currentRev, err return h, s.currentRev, err
@ -197,8 +197,8 @@ func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev
lower := revision{main: compactRev + 1} lower := revision{main: compactRev + 1}
h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
h.Write(buckets.Key.Name()) h.Write(schema.Key.Name())
err = tx.UnsafeForEach(buckets.Key, func(k, v []byte) error { err = tx.UnsafeForEach(schema.Key, func(k, v []byte) error {
kr := bytesToRev(k) kr := bytesToRev(k)
if !upper.GreaterThan(kr) { if !upper.GreaterThan(kr) {
return nil return nil
@ -340,8 +340,8 @@ func (s *store) restore() error {
s.lg.Info( s.lg.Info(
"restored last compact revision", "restored last compact revision",
zap.Stringer("meta-bucket-name", buckets.Meta), zap.Stringer("meta-bucket-name", schema.Meta),
zap.String("meta-bucket-name-key", string(buckets.FinishedCompactKeyName)), zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)),
zap.Int64("restored-compact-revision", s.compactMainRev), zap.Int64("restored-compact-revision", s.compactMainRev),
) )
s.revMu.Unlock() s.revMu.Unlock()
@ -351,7 +351,7 @@ func (s *store) restore() error {
keysGauge.Set(0) keysGauge.Set(0)
rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
for { for {
keys, vals := tx.UnsafeRange(buckets.Key, min, max, int64(restoreChunkKeys)) keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys))
if len(keys) == 0 { if len(keys) == 0 {
break break
} }
@ -412,8 +412,8 @@ func (s *store) restore() error {
s.lg.Info( s.lg.Info(
"resume scheduled compaction", "resume scheduled compaction",
zap.Stringer("meta-bucket-name", buckets.Meta), zap.Stringer("meta-bucket-name", schema.Meta),
zap.String("meta-bucket-name-key", string(buckets.ScheduledCompactKeyName)), zap.String("meta-bucket-name-key", string(schema.ScheduledCompactKeyName)),
zap.Int64("scheduled-compact-revision", scheduledCompact), zap.Int64("scheduled-compact-revision", scheduledCompact),
) )
} }

View File

@ -22,8 +22,8 @@ import (
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/etcdserver/cindex" "go.etcd.io/etcd/server/v3/etcdserver/cindex"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -84,7 +84,7 @@ func BenchmarkConsistentIndex(b *testing.B) {
tx := be.BatchTx() tx := be.BatchTx()
tx.Lock() tx.Lock()
buckets.UnsafeCreateMetaBucket(tx) schema.UnsafeCreateMetaBucket(tx)
ci.UnsafeSave(tx) ci.UnsafeSave(tx)
tx.Unlock() tx.Unlock()

View File

@ -18,7 +18,7 @@ import (
"encoding/binary" "encoding/binary"
"time" "time"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -43,11 +43,11 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc
tx := s.b.BatchTx() tx := s.b.BatchTx()
tx.Lock() tx.Lock()
keys, _ := tx.UnsafeRange(buckets.Key, last, end, int64(batchNum)) keys, _ := tx.UnsafeRange(schema.Key, last, end, int64(batchNum))
for _, key := range keys { for _, key := range keys {
rev = bytesToRev(key) rev = bytesToRev(key)
if _, ok := keep[rev]; !ok { if _, ok := keep[rev]; !ok {
tx.UnsafeDelete(buckets.Key, key) tx.UnsafeDelete(schema.Key, key)
keyCompactions++ keyCompactions++
} }
} }

View File

@ -23,8 +23,8 @@ import (
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -75,7 +75,7 @@ func TestScheduleCompaction(t *testing.T) {
ibytes := newRevBytes() ibytes := newRevBytes()
for _, rev := range revs { for _, rev := range revs {
revToBytes(rev, ibytes) revToBytes(rev, ibytes)
tx.UnsafePut(buckets.Key, ibytes, []byte("bar")) tx.UnsafePut(schema.Key, ibytes, []byte("bar"))
} }
tx.Unlock() tx.Unlock()
@ -84,14 +84,14 @@ func TestScheduleCompaction(t *testing.T) {
tx.Lock() tx.Lock()
for _, rev := range tt.wrevs { for _, rev := range tt.wrevs {
revToBytes(rev, ibytes) revToBytes(rev, ibytes)
keys, _ := tx.UnsafeRange(buckets.Key, ibytes, nil, 0) keys, _ := tx.UnsafeRange(schema.Key, ibytes, nil, 0)
if len(keys) != 1 { if len(keys) != 1 {
t.Errorf("#%d: range on %v = %d, want 1", i, rev, len(keys)) t.Errorf("#%d: range on %v = %d, want 1", i, rev, len(keys))
} }
} }
vals, _ := UnsafeReadFinishedCompact(tx) vals, _ := UnsafeReadFinishedCompact(tx)
if !reflect.DeepEqual(vals, tt.rev) { if !reflect.DeepEqual(vals, tt.rev) {
t.Errorf("#%d: vals on %v = %+v, want %+v", i, buckets.FinishedCompactKeyName, vals, tt.rev) t.Errorf("#%d: vals on %v = %+v, want %+v", i, schema.FinishedCompactKeyName, vals, tt.rev)
} }
tx.Unlock() tx.Unlock()

View File

@ -35,9 +35,9 @@ import (
"go.etcd.io/etcd/pkg/v3/schedule" "go.etcd.io/etcd/pkg/v3/schedule"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -149,12 +149,12 @@ func TestStorePut(t *testing.T) {
} }
wact := []testutil.Action{ wact := []testutil.Action{
{Name: "seqput", Params: []interface{}{buckets.Key, tt.wkey, data}}, {Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}},
} }
if tt.rr != nil { if tt.rr != nil {
wact = []testutil.Action{ wact = []testutil.Action{
{Name: "seqput", Params: []interface{}{buckets.Key, tt.wkey, data}}, {Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}},
} }
} }
@ -229,7 +229,7 @@ func TestStoreRange(t *testing.T) {
wstart := newRevBytes() wstart := newRevBytes()
revToBytes(tt.idxr.revs[0], wstart) revToBytes(tt.idxr.revs[0], wstart)
wact := []testutil.Action{ wact := []testutil.Action{
{Name: "range", Params: []interface{}{buckets.Key, wstart, []byte(nil), int64(0)}}, {Name: "range", Params: []interface{}{schema.Key, wstart, []byte(nil), int64(0)}},
} }
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact) t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
@ -304,7 +304,7 @@ func TestStoreDeleteRange(t *testing.T) {
t.Errorf("#%d: marshal err = %v, want nil", i, err) t.Errorf("#%d: marshal err = %v, want nil", i, err)
} }
wact := []testutil.Action{ wact := []testutil.Action{
{Name: "seqput", Params: []interface{}{buckets.Key, tt.wkey, data}}, {Name: "seqput", Params: []interface{}{schema.Key, tt.wkey, data}},
} }
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact) t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact)
@ -343,10 +343,10 @@ func TestStoreCompact(t *testing.T) {
end := make([]byte, 8) end := make([]byte, 8)
binary.BigEndian.PutUint64(end, uint64(4)) binary.BigEndian.PutUint64(end, uint64(4))
wact := []testutil.Action{ wact := []testutil.Action{
{Name: "put", Params: []interface{}{buckets.Meta, buckets.ScheduledCompactKeyName, newTestRevBytes(revision{3, 0})}}, {Name: "put", Params: []interface{}{schema.Meta, schema.ScheduledCompactKeyName, newTestRevBytes(revision{3, 0})}},
{Name: "range", Params: []interface{}{buckets.Key, make([]byte, 17), end, int64(10000)}}, {Name: "range", Params: []interface{}{schema.Key, make([]byte, 17), end, int64(10000)}},
{Name: "delete", Params: []interface{}{buckets.Key, key2}}, {Name: "delete", Params: []interface{}{schema.Key, key2}},
{Name: "put", Params: []interface{}{buckets.Meta, buckets.FinishedCompactKeyName, newTestRevBytes(revision{3, 0})}}, {Name: "put", Params: []interface{}{schema.Meta, schema.FinishedCompactKeyName, newTestRevBytes(revision{3, 0})}},
} }
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("tx actions = %+v, want %+v", g, wact) t.Errorf("tx actions = %+v, want %+v", g, wact)
@ -384,8 +384,8 @@ func TestStoreRestore(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
b.tx.rangeRespc <- rangeResp{[][]byte{buckets.FinishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} b.tx.rangeRespc <- rangeResp{[][]byte{schema.FinishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
b.tx.rangeRespc <- rangeResp{[][]byte{buckets.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} b.tx.rangeRespc <- rangeResp{[][]byte{schema.ScheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}}
b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}} b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}}
b.tx.rangeRespc <- rangeResp{nil, nil} b.tx.rangeRespc <- rangeResp{nil, nil}
@ -399,9 +399,9 @@ func TestStoreRestore(t *testing.T) {
t.Errorf("current rev = %v, want 5", s.currentRev) t.Errorf("current rev = %v, want 5", s.currentRev)
} }
wact := []testutil.Action{ wact := []testutil.Action{
{Name: "range", Params: []interface{}{buckets.Meta, buckets.FinishedCompactKeyName, []byte(nil), int64(0)}}, {Name: "range", Params: []interface{}{schema.Meta, schema.FinishedCompactKeyName, []byte(nil), int64(0)}},
{Name: "range", Params: []interface{}{buckets.Meta, buckets.ScheduledCompactKeyName, []byte(nil), int64(0)}}, {Name: "range", Params: []interface{}{schema.Meta, schema.ScheduledCompactKeyName, []byte(nil), int64(0)}},
{Name: "range", Params: []interface{}{buckets.Key, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}}, {Name: "range", Params: []interface{}{schema.Key, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}},
} }
if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { if g := b.tx.Action(); !reflect.DeepEqual(g, wact) {
t.Errorf("tx actions = %+v, want %+v", g, wact) t.Errorf("tx actions = %+v, want %+v", g, wact)
@ -485,7 +485,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
revToBytes(revision{main: 2}, rbytes) revToBytes(revision{main: 2}, rbytes)
tx := s0.b.BatchTx() tx := s0.b.BatchTx()
tx.Lock() tx.Lock()
tx.UnsafePut(buckets.Meta, buckets.ScheduledCompactKeyName, rbytes) tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes)
tx.Unlock() tx.Unlock()
s0.Close() s0.Close()
@ -514,7 +514,7 @@ func TestRestoreContinueUnfinishedCompaction(t *testing.T) {
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
tx := s.b.BatchTx() tx := s.b.BatchTx()
tx.Lock() tx.Lock()
ks, _ := tx.UnsafeRange(buckets.Key, revbytes, nil, 0) ks, _ := tx.UnsafeRange(schema.Key, revbytes, nil, 0)
tx.Unlock() tx.Unlock()
if len(ks) != 0 { if len(ks) != 0 {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)

View File

@ -20,8 +20,8 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -160,7 +160,7 @@ func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev i
default: default:
} }
revToBytes(revpair, revBytes) revToBytes(revpair, revBytes)
_, vs := tr.tx.UnsafeRange(buckets.Key, revBytes, nil, 0) _, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0)
if len(vs) != 1 { if len(vs) != 1 {
tr.s.lg.Fatal( tr.s.lg.Fatal(
"range failed to find revision pair", "range failed to find revision pair",
@ -215,7 +215,7 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
} }
tw.trace.Step("marshal mvccpb.KeyValue") tw.trace.Step("marshal mvccpb.KeyValue")
tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d) tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
tw.s.kvindex.Put(key, idxRev) tw.s.kvindex.Put(key, idxRev)
tw.changes = append(tw.changes, kv) tw.changes = append(tw.changes, kv)
tw.trace.Step("store kv pair into bolt db") tw.trace.Step("store kv pair into bolt db")
@ -276,7 +276,7 @@ func (tw *storeTxnWrite) delete(key []byte) {
) )
} }
tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d) tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
err = tw.s.kvindex.Tombstone(key, idxRev) err = tw.s.kvindex.Tombstone(key, idxRev)
if err != nil { if err != nil {
tw.storeTxnRead.s.lg.Fatal( tw.storeTxnRead.s.lg.Fatal(

View File

@ -15,12 +15,12 @@
package mvcc package mvcc
import ( import (
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found bool) { func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found bool) {
_, finishedCompactBytes := tx.UnsafeRange(buckets.Meta, buckets.FinishedCompactKeyName, nil, 0) _, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0)
if len(finishedCompactBytes) != 0 { if len(finishedCompactBytes) != 0 {
return bytesToRev(finishedCompactBytes[0]).main, true return bytesToRev(finishedCompactBytes[0]).main, true
} }
@ -28,7 +28,7 @@ func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found b
} }
func UnsafeReadScheduledCompact(tx backend.ReadTx) (scheduledComact int64, found bool) { func UnsafeReadScheduledCompact(tx backend.ReadTx) (scheduledComact int64, found bool) {
_, scheduledCompactBytes := tx.UnsafeRange(buckets.Meta, buckets.ScheduledCompactKeyName, nil, 0) _, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0)
if len(scheduledCompactBytes) != 0 { if len(scheduledCompactBytes) != 0 {
return bytesToRev(scheduledCompactBytes[0]).main, true return bytesToRev(scheduledCompactBytes[0]).main, true
} }
@ -44,7 +44,7 @@ func SetScheduledCompact(tx backend.BatchTx, value int64) {
func UnsafeSetScheduledCompact(tx backend.BatchTx, value int64) { func UnsafeSetScheduledCompact(tx backend.BatchTx, value int64) {
rbytes := newRevBytes() rbytes := newRevBytes()
revToBytes(revision{main: value}, rbytes) revToBytes(revision{main: value}, rbytes)
tx.UnsafePut(buckets.Meta, buckets.ScheduledCompactKeyName, rbytes) tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes)
} }
func SetFinishedCompact(tx backend.BatchTx, value int64) { func SetFinishedCompact(tx backend.BatchTx, value int64) {
@ -56,5 +56,5 @@ func SetFinishedCompact(tx backend.BatchTx, value int64) {
func UnsafeSetFinishedCompact(tx backend.BatchTx, value int64) { func UnsafeSetFinishedCompact(tx backend.BatchTx, value int64) {
rbytes := newRevBytes() rbytes := newRevBytes()
revToBytes(revision{main: value}, rbytes) revToBytes(revision{main: value}, rbytes)
tx.UnsafePut(buckets.Meta, buckets.FinishedCompactKeyName, rbytes) tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes)
} }

View File

@ -8,9 +8,9 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
// TestScheduledCompact ensures that UnsafeSetScheduledCompact&UnsafeReadScheduledCompact work well together. // TestScheduledCompact ensures that UnsafeSetScheduledCompact&UnsafeReadScheduledCompact work well together.
@ -39,7 +39,7 @@ func TestScheduledCompact(t *testing.T) {
t.Fatal("batch tx is nil") t.Fatal("batch tx is nil")
} }
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Meta) tx.UnsafeCreateBucket(schema.Meta)
UnsafeSetScheduledCompact(tx, tc.value) UnsafeSetScheduledCompact(tx, tc.value)
tx.Unlock() tx.Unlock()
be.ForceCommit() be.ForceCommit()
@ -80,7 +80,7 @@ func TestFinishedCompact(t *testing.T) {
t.Fatal("batch tx is nil") t.Fatal("batch tx is nil")
} }
tx.Lock() tx.Lock()
tx.UnsafeCreateBucket(buckets.Meta) tx.UnsafeCreateBucket(schema.Meta)
UnsafeSetFinishedCompact(tx, tc.value) UnsafeSetFinishedCompact(tx, tc.value)
tx.Unlock() tx.Unlock()
be.ForceCommit() be.ForceCommit()

View File

@ -18,8 +18,8 @@ import (
"fmt" "fmt"
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
) )
func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
@ -32,6 +32,6 @@ func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
} }
be.BatchTx().Lock() be.BatchTx().Lock()
be.BatchTx().UnsafePut(buckets.Key, ibytes, d) be.BatchTx().UnsafePut(schema.Key, ibytes, d)
be.BatchTx().Unlock() be.BatchTx().Unlock()
} }

View File

@ -21,8 +21,8 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -354,7 +354,7 @@ func (s *watchableStore) syncWatchers() int {
// values are actual key-value pairs in backend. // values are actual key-value pairs in backend.
tx := s.store.b.ReadTx() tx := s.store.b.ReadTx()
tx.RLock() tx.RLock()
revs, vs := tx.UnsafeRange(buckets.Key, minBytes, maxBytes, 0) revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0)
tx.RUnlock() tx.RUnlock()
evs := kvsToEvents(s.store.lg, wg, revs, vs) evs := kvsToEvents(s.store.lg, wg, revs, vs)

View File

@ -21,7 +21,7 @@ import (
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -26,7 +26,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -19,7 +19,7 @@ import (
"testing" "testing"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -24,7 +24,7 @@ import (
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
const ( const (

View File

@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"fmt" "fmt"
@ -21,8 +21,8 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
) )
// TestAuthEnabled ensures that UnsafeSaveAuthEnabled&UnsafeReadAuthEnabled work well together. // TestAuthEnabled ensures that UnsafeSaveAuthEnabled&UnsafeReadAuthEnabled work well together.

View File

@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"bytes" "bytes"
"go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
var ( var (

View File

@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"encoding/binary" "encoding/binary"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet). // UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet).

View File

@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"encoding/json" "encoding/json"
"log" "log"
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.uber.org/zap" "go.uber.org/zap"
) )

View File

@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
) )

View File

@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"encoding/binary" "encoding/binary"
"math" "math"
"go.etcd.io/etcd/server/v3/lease/leasepb" "go.etcd.io/etcd/server/v3/lease/leasepb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
func UnsafeCreateLeaseBucket(tx backend.BatchTx) { func UnsafeCreateLeaseBucket(tx backend.BatchTx) {

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"encoding/json" "encoding/json"
@ -20,7 +20,7 @@ import (
"go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/client/pkg/v3/types"
"go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"github.com/coreos/go-semver/semver" "github.com/coreos/go-semver/semver"
"go.uber.org/zap" "go.uber.org/zap"

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package version package schema
import ( import (
"fmt" "fmt"
@ -20,8 +20,7 @@ import (
"github.com/coreos/go-semver/semver" "github.com/coreos/go-semver/semver"
"go.uber.org/zap" "go.uber.org/zap"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets"
) )
var ( var (
@ -29,8 +28,8 @@ var (
V3_6 = semver.Version{Major: 3, Minor: 6} V3_6 = semver.Version{Major: 3, Minor: 6}
) )
// UpdateStorageVersion updates storage version. // UpdateStorageSchema updates storage version.
func UpdateStorageVersion(lg *zap.Logger, tx backend.BatchTx) error { func UpdateStorageSchema(lg *zap.Logger, tx backend.BatchTx) error {
tx.Lock() tx.Lock()
defer tx.Unlock() defer tx.Unlock()
v, err := detectStorageVersion(lg, tx) v, err := detectStorageVersion(lg, tx)
@ -41,7 +40,7 @@ func UpdateStorageVersion(lg *zap.Logger, tx backend.BatchTx) error {
case V3_5: case V3_5:
lg.Warn("setting storage version", zap.String("storage-version", V3_6.String())) lg.Warn("setting storage version", zap.String("storage-version", V3_6.String()))
// All meta keys introduced in v3.6 should be filled in here. // All meta keys introduced in v3.6 should be filled in here.
buckets.UnsafeSetStorageVersion(tx, &V3_6) UnsafeSetStorageVersion(tx, &V3_6)
case V3_6: case V3_6:
default: default:
lg.Warn("unknown storage version", zap.String("storage-version", v.String())) lg.Warn("unknown storage version", zap.String("storage-version", v.String()))
@ -50,17 +49,17 @@ func UpdateStorageVersion(lg *zap.Logger, tx backend.BatchTx) error {
} }
func detectStorageVersion(lg *zap.Logger, tx backend.ReadTx) (*semver.Version, error) { func detectStorageVersion(lg *zap.Logger, tx backend.ReadTx) (*semver.Version, error) {
v := buckets.UnsafeReadStorageVersion(tx) v := UnsafeReadStorageVersion(tx)
if v != nil { if v != nil {
return v, nil return v, nil
} }
confstate := buckets.UnsafeConfStateFromBackend(lg, tx) confstate := UnsafeConfStateFromBackend(lg, tx)
if confstate == nil { if confstate == nil {
return nil, fmt.Errorf("missing %q key", buckets.MetaConfStateName) return nil, fmt.Errorf("missing %q key", MetaConfStateName)
} }
_, term := buckets.UnsafeReadConsistentIndex(tx) _, term := UnsafeReadConsistentIndex(tx)
if term == 0 { if term == 0 {
return nil, fmt.Errorf("missing %q key", buckets.MetaTermKeyName) return nil, fmt.Errorf("missing %q key", MetaTermKeyName)
} }
copied := V3_5 copied := V3_5
return &copied, nil return &copied, nil

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package version package schema
import ( import (
"testing" "testing"
@ -22,9 +22,8 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing" betesting "go.etcd.io/etcd/server/v3/storage/backend/testing"
"go.etcd.io/etcd/server/v3/mvcc/buckets"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -47,7 +46,7 @@ func TestUpdateStorageVersion(t *testing.T) {
{ {
name: `Backend before 3.6 without "term" should be rejected`, name: `Backend before 3.6 without "term" should be rejected`,
version: "", version: "",
metaKeys: [][]byte{buckets.MetaConfStateName}, metaKeys: [][]byte{MetaConfStateName},
expectVersion: nil, expectVersion: nil,
expectError: true, expectError: true,
expectedErrorMsg: `cannot determine storage version: missing "term" key`, expectedErrorMsg: `cannot determine storage version: missing "term" key`,
@ -55,25 +54,25 @@ func TestUpdateStorageVersion(t *testing.T) {
{ {
name: "Backend with 3.5 with all metadata keys should be upgraded to v3.6", name: "Backend with 3.5 with all metadata keys should be upgraded to v3.6",
version: "", version: "",
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName}, metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName},
expectVersion: &semver.Version{Major: 3, Minor: 6}, expectVersion: &semver.Version{Major: 3, Minor: 6},
}, },
{ {
name: "Backend in 3.6.0 should be skipped", name: "Backend in 3.6.0 should be skipped",
version: "3.6.0", version: "3.6.0",
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName, buckets.MetaStorageVersionName}, metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName, MetaStorageVersionName},
expectVersion: &semver.Version{Major: 3, Minor: 6}, expectVersion: &semver.Version{Major: 3, Minor: 6},
}, },
{ {
name: "Backend with current version should be skipped", name: "Backend with current version should be skipped",
version: version.Version, version: version.Version,
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName, buckets.MetaStorageVersionName}, metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName, MetaStorageVersionName},
expectVersion: &semver.Version{Major: 3, Minor: 6}, expectVersion: &semver.Version{Major: 3, Minor: 6},
}, },
{ {
name: "Backend in 3.7.0 should be skipped", name: "Backend in 3.7.0 should be skipped",
version: "3.7.0", version: "3.7.0",
metaKeys: [][]byte{buckets.MetaTermKeyName, buckets.MetaConfStateName, buckets.MetaStorageVersionName, []byte("future-key")}, metaKeys: [][]byte{MetaTermKeyName, MetaConfStateName, MetaStorageVersionName, []byte("future-key")},
expectVersion: &semver.Version{Major: 3, Minor: 7}, expectVersion: &semver.Version{Major: 3, Minor: 7},
}, },
} }
@ -86,19 +85,19 @@ func TestUpdateStorageVersion(t *testing.T) {
t.Fatal("batch tx is nil") t.Fatal("batch tx is nil")
} }
tx.Lock() tx.Lock()
buckets.UnsafeCreateMetaBucket(tx) UnsafeCreateMetaBucket(tx)
for _, k := range tc.metaKeys { for _, k := range tc.metaKeys {
switch string(k) { switch string(k) {
case string(buckets.MetaConfStateName): case string(MetaConfStateName):
buckets.MustUnsafeSaveConfStateToBackend(lg, tx, &raftpb.ConfState{}) MustUnsafeSaveConfStateToBackend(lg, tx, &raftpb.ConfState{})
case string(buckets.MetaTermKeyName): case string(MetaTermKeyName):
buckets.UnsafeUpdateConsistentIndex(tx, 1, 1, false) UnsafeUpdateConsistentIndex(tx, 1, 1, false)
default: default:
tx.UnsafePut(buckets.Meta, k, []byte{}) tx.UnsafePut(Meta, k, []byte{})
} }
} }
if tc.version != "" { if tc.version != "" {
buckets.UnsafeSetStorageVersion(tx, semver.New(tc.version)) UnsafeSetStorageVersion(tx, semver.New(tc.version))
} }
tx.Unlock() tx.Unlock()
be.ForceCommit() be.ForceCommit()
@ -106,14 +105,14 @@ func TestUpdateStorageVersion(t *testing.T) {
b := backend.NewDefaultBackend(tmpPath) b := backend.NewDefaultBackend(tmpPath)
defer b.Close() defer b.Close()
err := UpdateStorageVersion(lg, b.BatchTx()) err := UpdateStorageSchema(lg, b.BatchTx())
if (err != nil) != tc.expectError { if (err != nil) != tc.expectError {
t.Errorf("UpgradeStorage(...) = %+v, expected error: %v", err, tc.expectError) t.Errorf("UpgradeStorage(...) = %+v, expected error: %v", err, tc.expectError)
} }
if err != nil && err.Error() != tc.expectedErrorMsg { if err != nil && err.Error() != tc.expectedErrorMsg {
t.Errorf("UpgradeStorage(...) = %q, expected error message: %q", err, tc.expectedErrorMsg) t.Errorf("UpgradeStorage(...) = %q, expected error message: %q", err, tc.expectedErrorMsg)
} }
v := buckets.UnsafeReadStorageVersion(b.BatchTx()) v := UnsafeReadStorageVersion(b.BatchTx())
assert.Equal(t, tc.expectVersion, v) assert.Equal(t, tc.expectVersion, v)
}) })
} }

View File

@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"github.com/coreos/go-semver/semver" "github.com/coreos/go-semver/semver"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
) )
// ReadStorageVersion loads storage version from given backend transaction. // ReadStorageVersion loads storage version from given backend transaction.

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package buckets package schema
import ( import (
"testing" "testing"
@ -22,8 +22,8 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"go.etcd.io/bbolt" "go.etcd.io/bbolt"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/backend/testing" "go.etcd.io/etcd/server/v3/storage/backend/testing"
) )
// TestVersion ensures that UnsafeSetStorageVersion/UnsafeReadStorageVersion work well together. // TestVersion ensures that UnsafeSetStorageVersion/UnsafeReadStorageVersion work well together.

View File

@ -20,8 +20,8 @@ import (
"go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/raft/v3/raftpb"
"go.etcd.io/etcd/server/v3/datadir" "go.etcd.io/etcd/server/v3/datadir"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
wal2 "go.etcd.io/etcd/server/v3/wal" wal2 "go.etcd.io/etcd/server/v3/wal"
"go.etcd.io/etcd/server/v3/wal/walpb" "go.etcd.io/etcd/server/v3/wal/walpb"
"go.uber.org/zap" "go.uber.org/zap"
@ -109,7 +109,7 @@ func MustVerifyIfEnabled(cfg Config) {
func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error { func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error {
tx := be.BatchTx() tx := be.BatchTx()
index, term := buckets.ReadConsistentIndex(tx) index, term := schema.ReadConsistentIndex(tx)
if cfg.ExactIndex && index != hardstate.Commit { if cfg.ExactIndex && index != hardstate.Commit {
return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit) return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit)
} }

View File

@ -32,8 +32,8 @@ import (
"go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.etcd.io/etcd/tests/v3/integration" "go.etcd.io/etcd/tests/v3/integration"
) )

View File

@ -25,8 +25,8 @@ import (
pb "go.etcd.io/etcd/api/v3/etcdserverpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
"go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/traceutil"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/mvcc"
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
) )

View File

@ -21,8 +21,8 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
"go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease"
"go.etcd.io/etcd/server/v3/mvcc" "go.etcd.io/etcd/server/v3/storage/backend"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/mvcc"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View File

@ -20,11 +20,11 @@ import (
"path/filepath" "path/filepath"
"go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/api/v3/authpb"
"go.etcd.io/etcd/server/v3/mvcc/buckets" "go.etcd.io/etcd/server/v3/storage/schema"
"go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/api/v3/mvccpb"
"go.etcd.io/etcd/server/v3/lease/leasepb" "go.etcd.io/etcd/server/v3/lease/leasepb"
"go.etcd.io/etcd/server/v3/mvcc/backend" "go.etcd.io/etcd/server/v3/storage/backend"
bolt "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt"
) )
@ -163,7 +163,7 @@ func iterateBucket(dbPath, bucket string, limit uint64, decode bool) (err error)
func getHash(dbPath string) (hash uint32, err error) { func getHash(dbPath string) (hash uint32, err error) {
b := backend.NewDefaultBackend(dbPath) b := backend.NewDefaultBackend(dbPath)
return b.Hash(buckets.DefaultIgnores) return b.Hash(schema.DefaultIgnores)
} }
// TODO: revert by revision and find specified hash value // TODO: revert by revision and find specified hash value