Restructure database to prevent double-slashes in keys, causing bugs in cursors (#1432)

* Add TestValidateAndInsertPruningPointWithSideBlocks

* Optimize infrastracture bucket paths

* Update infrastracture tests

* Refactor the consensus/database layer

* Remove utils/dbkeys

* Use consensus/database in consensus instead of infrastructure

* Fix a bug in dbBucketToDatabaseBucket and MakeBucket combination

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
This commit is contained in:
Svarog
2021-01-19 14:19:08 +02:00
committed by GitHub
parent a4adbabf96
commit ad9c213a06
34 changed files with 327 additions and 215 deletions

View File

@@ -3,8 +3,7 @@ package consensus
import ( import (
"sync" "sync"
"github.com/kaspanet/kaspad/infrastructure/db/database" "github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors" "github.com/kaspanet/kaspad/domain/consensus/ruleerrors"

View File

@@ -6,9 +6,18 @@ import (
) )
func dbBucketToDatabaseBucket(bucket model.DBBucket) *database.Bucket { func dbBucketToDatabaseBucket(bucket model.DBBucket) *database.Bucket {
if bucket, ok := bucket.(dbBucket); ok {
return bucket.bucket
}
// This assumes that MakeBucket(src).Path() == src. which is not promised anywhere.
return database.MakeBucket(bucket.Path()) return database.MakeBucket(bucket.Path())
} }
// MakeBucket creates a new Bucket using the given path of buckets.
func MakeBucket(path []byte) model.DBBucket {
return dbBucket{bucket: database.MakeBucket(path)}
}
type dbBucket struct { type dbBucket struct {
bucket *database.Bucket bucket *database.Bucket
} }
@@ -26,5 +35,5 @@ func (d dbBucket) Path() []byte {
} }
func newDBBucket(bucket *database.Bucket) model.DBBucket { func newDBBucket(bucket *database.Bucket) model.DBBucket {
return &dbBucket{bucket: bucket} return dbBucket{bucket: bucket}
} }

View File

@@ -0,0 +1,14 @@
package database
import (
"github.com/kaspanet/kaspad/infrastructure/db/database"
)
// ErrNotFound denotes that the requested item was not
// found in the database.
var ErrNotFound = database.ErrNotFound
// IsNotFoundError checks whether an error is an ErrNotFound.
func IsNotFoundError(err error) bool {
return database.IsNotFoundError(err)
}

View File

@@ -6,6 +6,9 @@ import (
) )
func dbKeyToDatabaseKey(key model.DBKey) *database.Key { func dbKeyToDatabaseKey(key model.DBKey) *database.Key {
if key, ok := key.(dbKey); ok {
return key.key
}
return dbBucketToDatabaseBucket(key.Bucket()).Key(key.Suffix()) return dbBucketToDatabaseBucket(key.Bucket()).Key(key.Suffix())
} }

View File

@@ -1,15 +1,15 @@
package acceptancedatastore package acceptancedatastore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
var bucket = dbkeys.MakeBucket([]byte("acceptance-data")) var bucket = database.MakeBucket([]byte("acceptance-data"))
// acceptanceDataStore represents a store of AcceptanceData // acceptanceDataStore represents a store of AcceptanceData
type acceptanceDataStore struct { type acceptanceDataStore struct {

View File

@@ -2,15 +2,15 @@ package blockheaderstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("block-headers")) var bucket = database.MakeBucket([]byte("block-headers"))
var countKey = dbkeys.MakeBucket().Key([]byte("block-headers-count")) var countKey = database.MakeBucket(nil).Key([]byte("block-headers-count"))
// blockHeaderStore represents a store of blocks // blockHeaderStore represents a store of blocks
type blockHeaderStore struct { type blockHeaderStore struct {

View File

@@ -2,14 +2,14 @@ package blockrelationstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("block-relations")) var bucket = database.MakeBucket([]byte("block-relations"))
// blockRelationStore represents a store of BlockRelations // blockRelationStore represents a store of BlockRelations
type blockRelationStore struct { type blockRelationStore struct {

View File

@@ -2,14 +2,14 @@ package blockstatusstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("block-statuses")) var bucket = database.MakeBucket([]byte("block-statuses"))
// blockStatusStore represents a store of BlockStatuses // blockStatusStore represents a store of BlockStatuses
type blockStatusStore struct { type blockStatusStore struct {

View File

@@ -2,15 +2,15 @@ package blockstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("blocks")) var bucket = database.MakeBucket([]byte("blocks"))
var countKey = dbkeys.MakeBucket().Key([]byte("blocks-count")) var countKey = database.MakeBucket(nil).Key([]byte("blocks-count"))
// blockStore represents a store of blocks // blockStore represents a store of blocks
type blockStore struct { type blockStore struct {

View File

@@ -2,13 +2,13 @@ package consensusstatestore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
) )
var tipsKey = dbkeys.MakeBucket().Key([]byte("tips")) var tipsKey = database.MakeBucket(nil).Key([]byte("tips"))
func (css *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) { func (css *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
if css.tipsStaging != nil { if css.tipsStaging != nil {

View File

@@ -1,14 +1,14 @@
package consensusstatestore package consensusstatestore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo" "github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var utxoSetBucket = dbkeys.MakeBucket([]byte("virtual-utxo-set")) var utxoSetBucket = database.MakeBucket([]byte("virtual-utxo-set"))
func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) { func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
serializedOutpoint, err := serializeOutpoint(outpoint) serializedOutpoint, err := serializeOutpoint(outpoint)

View File

@@ -2,13 +2,13 @@ package consensusstatestore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
) )
var virtualDiffParentsKey = dbkeys.MakeBucket().Key([]byte("virtual-diff-parents")) var virtualDiffParentsKey = database.MakeBucket(nil).Key([]byte("virtual-diff-parents"))
func (css *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*externalapi.DomainHash, error) { func (css *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
if css.virtualDiffParentsStaging != nil { if css.virtualDiffParentsStaging != nil {

View File

@@ -1,13 +1,13 @@
package finalitystore package finalitystore
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("finality-points")) var bucket = database.MakeBucket([]byte("finality-points"))
type finalityStore struct { type finalityStore struct {
staging map[externalapi.DomainHash]*externalapi.DomainHash staging map[externalapi.DomainHash]*externalapi.DomainHash

View File

@@ -2,14 +2,14 @@ package ghostdagdatastore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data")) var bucket = database.MakeBucket([]byte("block-ghostdag-data"))
// ghostdagDataStore represents a store of BlockGHOSTDAGData // ghostdagDataStore represents a store of BlockGHOSTDAGData
type ghostdagDataStore struct { type ghostdagDataStore struct {

View File

@@ -2,19 +2,18 @@ package headersselectedchainstore
import ( import (
"encoding/binary" "encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization" "github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var bucketChainBlockHashByIndex = dbkeys.MakeBucket([]byte("chain-block-hash-by-index")) var bucketChainBlockHashByIndex = database.MakeBucket([]byte("chain-block-hash-by-index"))
var bucketChainBlockIndexByHash = dbkeys.MakeBucket([]byte("chain-block-index-by-hash")) var bucketChainBlockIndexByHash = database.MakeBucket([]byte("chain-block-index-by-hash"))
var highestChainBlockIndexKey = dbkeys.MakeBucket().Key([]byte("highest-chain-block-index")) var highestChainBlockIndexKey = database.MakeBucket(nil).Key([]byte("highest-chain-block-index"))
type headersSelectedChainStore struct { type headersSelectedChainStore struct {
stagingAddedByHash map[externalapi.DomainHash]uint64 stagingAddedByHash map[externalapi.DomainHash]uint64

View File

@@ -2,13 +2,13 @@ package headersselectedtipstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
) )
var headerSelectedTipKey = dbkeys.MakeBucket().Key([]byte("headers-selected-tip")) var headerSelectedTipKey = database.MakeBucket(nil).Key([]byte("headers-selected-tip"))
type headerSelectedTipStore struct { type headerSelectedTipStore struct {
staging *externalapi.DomainHash staging *externalapi.DomainHash

View File

@@ -2,14 +2,14 @@ package multisetstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var bucket = dbkeys.MakeBucket([]byte("multisets")) var bucket = database.MakeBucket([]byte("multisets"))
// multisetStore represents a store of Multisets // multisetStore represents a store of Multisets
type multisetStore struct { type multisetStore struct {

View File

@@ -2,15 +2,15 @@ package pruningstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
) )
var pruningBlockHashKey = dbkeys.MakeBucket().Key([]byte("pruning-block-hash")) var pruningBlockHashKey = database.MakeBucket(nil).Key([]byte("pruning-block-hash"))
var candidatePruningPointHashKey = dbkeys.MakeBucket().Key([]byte("candidate-pruning-point-hash")) var candidatePruningPointHashKey = database.MakeBucket(nil).Key([]byte("candidate-pruning-point-hash"))
var pruningSerializedUTXOSetKey = dbkeys.MakeBucket().Key([]byte("pruning-utxo-set")) var pruningSerializedUTXOSetKey = database.MakeBucket(nil).Key([]byte("pruning-utxo-set"))
// pruningStore represents a store for the current pruning state // pruningStore represents a store for the current pruning state
type pruningStore struct { type pruningStore struct {

View File

@@ -2,15 +2,15 @@ package reachabilitydatastore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
) )
var reachabilityDataBucket = dbkeys.MakeBucket([]byte("reachability-data")) var reachabilityDataBucket = database.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = dbkeys.MakeBucket().Key([]byte("reachability-reindex-root")) var reachabilityReindexRootKey = database.MakeBucket(nil).Key([]byte("reachability-reindex-root"))
// reachabilityDataStore represents a store of ReachabilityData // reachabilityDataStore represents a store of ReachabilityData
type reachabilityDataStore struct { type reachabilityDataStore struct {

View File

@@ -2,16 +2,16 @@ package utxodiffstore
import ( import (
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization" "github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache" "github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
var utxoDiffBucket = dbkeys.MakeBucket([]byte("utxo-diffs")) var utxoDiffBucket = database.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = dbkeys.MakeBucket([]byte("utxo-diff-children")) var utxoDiffChildBucket = database.MakeBucket([]byte("utxo-diff-children"))
// utxoDiffStore represents a store of UTXODiffs // utxoDiffStore represents a store of UTXODiffs
type utxoDiffStore struct { type utxoDiffStore struct {

View File

@@ -1,8 +1,12 @@
package blockprocessor_test package blockprocessor_test
import ( import (
"testing"
"time"
"github.com/kaspanet/kaspad/domain/consensus" "github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors" "github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing" "github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils" "github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
@@ -11,10 +15,30 @@ import (
"github.com/kaspanet/kaspad/domain/dagconfig" "github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/pkg/errors" "github.com/pkg/errors"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"testing"
"time"
) )
func addBlock(tcSyncer, tcSyncee testapi.TestConsensus, parentHashes []*externalapi.DomainHash, t *testing.T) *externalapi.DomainHash {
block, _, err := tcSyncer.BuildBlockWithParents(parentHashes, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, err = tcSyncer.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{
Header: block.Header,
Transactions: nil,
})
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
return consensushashing.BlockHash(block)
}
func TestValidateAndInsertPruningPoint(t *testing.T) { func TestValidateAndInsertPruningPoint(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) { testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
// This is done to reduce the pruning depth to 6 blocks // This is done to reduce the pruning depth to 6 blocks
@@ -36,43 +60,21 @@ func TestValidateAndInsertPruningPoint(t *testing.T) {
} }
defer teardownSyncee(false) defer teardownSyncee(false)
addBlock := func(parentHashes []*externalapi.DomainHash) *externalapi.DomainHash {
block, _, err := tcSyncer.BuildBlockWithParents(parentHashes, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, err = tcSyncer.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{
Header: block.Header,
Transactions: nil,
})
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
return consensushashing.BlockHash(block)
}
tipHash := params.GenesisHash tipHash := params.GenesisHash
for i := 0; i < finalityDepth-2; i++ { for i := 0; i < finalityDepth-2; i++ {
tipHash = addBlock([]*externalapi.DomainHash{tipHash}) tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
} }
// Add block in the anticone of the pruning point to test such situation // Add block in the anticone of the pruning point to test such situation
pruningPointAnticoneBlock := addBlock([]*externalapi.DomainHash{tipHash}) pruningPointAnticoneBlock := addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
tipHash = addBlock([]*externalapi.DomainHash{tipHash}) tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
nextPruningPoint := addBlock([]*externalapi.DomainHash{tipHash}) nextPruningPoint := addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
tipHash = addBlock([]*externalapi.DomainHash{pruningPointAnticoneBlock, nextPruningPoint}) tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{pruningPointAnticoneBlock, nextPruningPoint}, t)
// Add blocks until the pruning point changes // Add blocks until the pruning point changes
for { for {
tipHash = addBlock([]*externalapi.DomainHash{tipHash}) tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
pruningPoint, err := tcSyncer.PruningPoint() pruningPoint, err := tcSyncer.PruningPoint()
if err != nil { if err != nil {
@@ -187,6 +189,179 @@ func TestValidateAndInsertPruningPoint(t *testing.T) {
}) })
} }
// TestValidateAndInsertPruningPointWithSideBlocks makes sure that when a node applies a UTXO-Set downloaded during
// IBD, while it already has a non-empty UTXO-Set originating from blocks mined on top of genesis - the resulting
// UTXO set is correct
func TestValidateAndInsertPruningPointWithSideBlocks(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
// This is done to reduce the pruning depth to 6 blocks
finalityDepth := 3
params.FinalityDuration = time.Duration(finalityDepth) * params.TargetTimePerBlock
params.K = 0
factory := consensus.NewFactory()
tcSyncer, teardownSyncer, err := factory.NewTestConsensus(params, false, "TestValidateAndInsertPruningPointSyncer")
if err != nil {
t.Fatalf("Error setting up tcSyncer: %+v", err)
}
defer teardownSyncer(false)
tcSyncee, teardownSyncee, err := factory.NewTestConsensus(params, false, "TestValidateAndInsertPruningPointSyncee")
if err != nil {
t.Fatalf("Error setting up tcSyncee: %+v", err)
}
defer teardownSyncee(false)
// Mine 2 block in the syncee on top of genesis
side, _, err := tcSyncee.AddBlock([]*externalapi.DomainHash{params.GenesisHash}, &externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{}, ExtraData: []byte{1, 2}}, nil)
if err != nil {
t.Fatal(err)
}
_, _, err = tcSyncee.AddBlock([]*externalapi.DomainHash{side}, nil, nil)
if err != nil {
t.Fatal(err)
}
tipHash := params.GenesisHash
for i := 0; i < finalityDepth-2; i++ {
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
}
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
nextPruningPoint := addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{nextPruningPoint}, t)
// Add blocks until the pruning point changes
for {
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
pruningPoint, err := tcSyncer.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !pruningPoint.Equal(params.GenesisHash) {
break
}
}
pruningPoint, err := tcSyncer.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !pruningPoint.Equal(nextPruningPoint) {
t.Fatalf("Unexpected pruning point %s", pruningPoint)
}
pruningPointUTXOSet, err := tcSyncer.GetPruningPointUTXOSet(pruningPoint)
if err != nil {
t.Fatalf("GetPruningPointUTXOSet: %+v", err)
}
// Check that ValidateAndInsertPruningPoint works.
pruningPointBlock, err := tcSyncer.GetBlock(pruningPoint)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
err = tcSyncee.ValidateAndInsertPruningPoint(pruningPointBlock, pruningPointUTXOSet)
if err != nil {
t.Fatalf("ValidateAndInsertPruningPoint: %+v", err)
}
// Insert the rest of the blocks atop pruning point
virtualSelectedParent, err := tcSyncer.GetVirtualSelectedParent()
if err != nil {
t.Fatalf("GetVirtualSelectedParent: %+v", err)
}
missingBlockBodyHashes, err := tcSyncee.GetMissingBlockBodyHashes(virtualSelectedParent)
if err != nil {
t.Fatalf("GetMissingBlockBodyHashes: %+v", err)
}
for _, missingHash := range missingBlockBodyHashes {
block, err := tcSyncer.GetBlock(missingHash)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
}
// Verify that syncee and syncer tips are equal
synceeTips, err := tcSyncee.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
syncerTips, err := tcSyncer.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
if !externalapi.HashesEqual(synceeTips, syncerTips) {
t.Fatalf("Syncee's tips are %s while syncer's are %s", synceeTips, syncerTips)
}
// Verify that syncee and syncer pruning points are equal
synceePruningPoint, err := tcSyncee.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !synceePruningPoint.Equal(pruningPoint) {
t.Fatalf("The syncee pruning point has not changed as exepcted")
}
pruningPointOld := pruningPoint
// Add blocks until the pruning point moves, and verify it moved to the same point on both syncer and syncee
for {
block, _, err := tcSyncer.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, err = tcSyncer.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
tipHash = consensushashing.BlockHash(block)
pruningPoint, err = tcSyncer.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !pruningPoint.Equal(pruningPointOld) {
break
}
}
synceePruningPoint, err = tcSyncee.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !synceePruningPoint.Equal(pruningPoint) {
t.Fatalf("The syncee pruning point(%s) is not equal to syncer pruning point (%s) after it moved. "+
"pruning point before move: %s", synceePruningPoint, pruningPoint, pruningPointOld)
}
})
}
type fakeUTXOSetIterator struct { type fakeUTXOSetIterator struct {
nextCalled bool nextCalled bool
} }

View File

@@ -3,9 +3,9 @@ package finalitymanager
import ( import (
"errors" "errors"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
) )
type finalityManager struct { type finalityManager struct {

View File

@@ -1,10 +1,10 @@
package reachabilitymanager package reachabilitymanager
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/reachabilitydata" "github.com/kaspanet/kaspad/domain/consensus/utils/reachabilitydata"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

View File

@@ -1,9 +1,9 @@
package syncmanager package syncmanager
import ( import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model" "github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi" "github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors" "github.com/pkg/errors"
) )

View File

@@ -1,86 +0,0 @@
package dbkeys
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/model"
)
var bucketSeparator = []byte("/")
// Key is a helper type meant to combine prefix
// and suffix into a single database key.
type Key struct {
bucket *Bucket
suffix []byte
}
// Bytes returns the full key bytes that are consisted
// from the bucket path concatenated to the suffix.
func (k *Key) Bytes() []byte {
bucketPath := k.bucket.Path()
keyBytes := make([]byte, len(bucketPath)+len(k.suffix))
copy(keyBytes, bucketPath)
copy(keyBytes[len(bucketPath):], k.suffix)
return keyBytes
}
func (k *Key) String() string {
return hex.EncodeToString(k.Bytes())
}
// Bucket returns the key bucket.
func (k *Key) Bucket() model.DBBucket {
return k.bucket
}
// Suffix returns the key suffix.
func (k *Key) Suffix() []byte {
return k.suffix
}
// newKey returns a new key composed
// of the given bucket and suffix
func newKey(bucket *Bucket, suffix []byte) model.DBKey {
return &Key{bucket: bucket, suffix: suffix}
}
// Bucket is a helper type meant to combine buckets
// and sub-buckets that can be used to create database
// keys and prefix-based cursors.
type Bucket struct {
path [][]byte
}
// MakeBucket creates a new Bucket using the given path
// of buckets.
func MakeBucket(path ...[]byte) model.DBBucket {
return &Bucket{path: path}
}
// Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) model.DBBucket {
newPath := make([][]byte, len(b.path)+1)
copy(newPath, b.path)
copy(newPath[len(b.path):], [][]byte{bucketBytes})
return MakeBucket(newPath...)
}
// Key returns a key in the current bucket with the
// given suffix.
func (b *Bucket) Key(suffix []byte) model.DBKey {
return newKey(b, suffix)
}
// Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, bucketSeparator)
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(bucketSeparator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], bucketSeparator)
return bucketPathWithFinalSeparator
}

View File

@@ -67,7 +67,7 @@ func populateDatabaseForTest(t *testing.T, db database.Database, testName string
// Prepare a list of key/value pairs // Prepare a list of key/value pairs
entries := make([]keyValuePair, 10) entries := make([]keyValuePair, 10)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
key := database.MakeBucket().Key([]byte(fmt.Sprintf("key%d", i))) key := database.MakeBucket(nil).Key([]byte(fmt.Sprintf("key%d", i)))
value := []byte("value") value := []byte("value")
entries[i] = keyValuePair{key: key, value: value} entries[i] = keyValuePair{key: key, value: value}
} }

View File

@@ -15,7 +15,7 @@ import (
) )
func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor { func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor {
cursor, err := db.Cursor(database.MakeBucket()) cursor, err := db.Cursor(database.MakeBucket(nil))
if err != nil { if err != nil {
t.Fatalf("%s: Cursor unexpectedly "+ t.Fatalf("%s: Cursor unexpectedly "+
"failed: %s", testName, err) "failed: %s", testName, err)
@@ -241,7 +241,7 @@ func testCursorSeek(t *testing.T, db database.Database, testName string) {
// Seek to a value that doesn't exist and make sure that // Seek to a value that doesn't exist and make sure that
// the returned error is ErrNotFound // the returned error is ErrNotFound
err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist"))) err = cursor.Seek(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil { if err == nil {
t.Fatalf("%s: Seek unexpectedly "+ t.Fatalf("%s: Seek unexpectedly "+
"succeeded", testName) "succeeded", testName)
@@ -274,7 +274,7 @@ func testCursorCloseErrors(t *testing.T, db database.Database, testName string)
{ {
name: "Seek", name: "Seek",
function: func() error { function: func() error {
return cursor.Seek(database.MakeBucket().Key([]byte{})) return cursor.Seek(database.MakeBucket(nil).Key([]byte{}))
}, },
}, },
{ {

View File

@@ -18,7 +18,7 @@ func TestDatabasePut(t *testing.T) {
func testDatabasePut(t *testing.T, db database.Database, testName string) { func testDatabasePut(t *testing.T, db database.Database, testName string) {
// Put value1 into the database // Put value1 into the database
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value1 := []byte("value1") value1 := []byte("value1")
err := db.Put(key, value1) err := db.Put(key, value1)
if err != nil { if err != nil {
@@ -65,7 +65,7 @@ func TestDatabaseGet(t *testing.T) {
func testDatabaseGet(t *testing.T, db database.Database, testName string) { func testDatabaseGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database // Put a value into the database
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err := db.Put(key, value) err := db.Put(key, value)
if err != nil { if err != nil {
@@ -87,7 +87,7 @@ func testDatabaseGet(t *testing.T, db database.Database, testName string) {
// Try getting a non-existent value and make sure // Try getting a non-existent value and make sure
// the returned error is ErrNotFound // the returned error is ErrNotFound
_, err = db.Get(database.MakeBucket().Key([]byte("doesn't exist"))) _, err = db.Get(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil { if err == nil {
t.Fatalf("%s: Get "+ t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName) "unexpectedly succeeded", testName)
@@ -104,7 +104,7 @@ func TestDatabaseHas(t *testing.T) {
func testDatabaseHas(t *testing.T, db database.Database, testName string) { func testDatabaseHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database // Put a value into the database
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err := db.Put(key, value) err := db.Put(key, value)
if err != nil { if err != nil {
@@ -124,7 +124,7 @@ func testDatabaseHas(t *testing.T, db database.Database, testName string) {
} }
// Make sure that Has returns false for a non-existent value // Make sure that Has returns false for a non-existent value
exists, err = db.Has(database.MakeBucket().Key([]byte("doesn't exist"))) exists, err = db.Has(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err != nil { if err != nil {
t.Fatalf("%s: Has "+ t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err) "unexpectedly failed: %s", testName, err)
@@ -141,7 +141,7 @@ func TestDatabaseDelete(t *testing.T) {
func testDatabaseDelete(t *testing.T, db database.Database, testName string) { func testDatabaseDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database // Put a value into the database
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err := db.Put(key, value) err := db.Put(key, value)
if err != nil { if err != nil {

View File

@@ -1,11 +1,10 @@
package database package database
import ( import (
"bytes"
"encoding/hex" "encoding/hex"
) )
var bucketSeparator = []byte("/") var bucketSeparator = byte('/')
// Key is a helper type meant to combine prefix // Key is a helper type meant to combine prefix
// and suffix into a single database key. // and suffix into a single database key.
@@ -48,23 +47,25 @@ func newKey(bucket *Bucket, suffix []byte) *Key {
// and sub-buckets that can be used to create database // and sub-buckets that can be used to create database
// keys and prefix-based cursors. // keys and prefix-based cursors.
type Bucket struct { type Bucket struct {
path [][]byte path []byte
} }
// MakeBucket creates a new Bucket using the given path // MakeBucket creates a new Bucket using the given path
// of buckets. // of buckets.
func MakeBucket(path ...[]byte) *Bucket { func MakeBucket(path []byte) *Bucket {
if len(path) > 0 && path[len(path)-1] != bucketSeparator {
path = append(path, bucketSeparator)
}
return &Bucket{path: path} return &Bucket{path: path}
} }
// Bucket returns the sub-bucket of the current bucket // Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes. // defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) *Bucket { func (b *Bucket) Bucket(bucketBytes []byte) *Bucket {
newPath := make([][]byte, len(b.path)+1) newPath := make([]byte, 0, len(b.path)+len(bucketBytes)+1) // +1 for the separator in MakeBucket
copy(newPath, b.path) newPath = append(newPath, b.path...)
copy(newPath[len(b.path):], [][]byte{bucketBytes}) newPath = append(newPath, bucketBytes...)
return MakeBucket(newPath)
return MakeBucket(newPath...)
} }
// Key returns a key in the current bucket with the // Key returns a key in the current bucket with the
@@ -75,11 +76,5 @@ func (b *Bucket) Key(suffix []byte) *Key {
// Path returns the full path of the current bucket. // Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte { func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, bucketSeparator) return b.path
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(bucketSeparator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], bucketSeparator)
return bucketPathWithFinalSeparator
} }

View File

@@ -6,6 +6,10 @@ import (
"testing" "testing"
) )
func makeBucketJoin(path ...[]byte) *Bucket {
return MakeBucket(bytes.Join(path, []byte{bucketSeparator}))
}
func TestBucketPath(t *testing.T) { func TestBucketPath(t *testing.T) {
tests := []struct { tests := []struct {
bucketByteSlices [][]byte bucketByteSlices [][]byte
@@ -23,14 +27,14 @@ func TestBucketPath(t *testing.T) {
for _, test := range tests { for _, test := range tests {
// Build a result using the MakeBucket function alone // Build a result using the MakeBucket function alone
resultKey := MakeBucket(test.bucketByteSlices...).Path() resultKey := makeBucketJoin(test.bucketByteSlices...).Path()
if !reflect.DeepEqual(resultKey, test.expectedPath) { if !reflect.DeepEqual(resultKey, test.expectedPath) {
t.Errorf("TestBucketPath: got wrong path using MakeBucket. "+ t.Errorf("TestBucketPath: got wrong path using MakeBucket. "+
"Want: %s, got: %s", string(test.expectedPath), string(resultKey)) "Want: %s, got: %s", string(test.expectedPath), string(resultKey))
} }
// Build a result using sub-Bucket calls // Build a result using sub-Bucket calls
bucket := MakeBucket() bucket := MakeBucket(nil)
for _, bucketBytes := range test.bucketByteSlices { for _, bucketBytes := range test.bucketByteSlices {
bucket = bucket.Bucket(bucketBytes) bucket = bucket.Bucket(bucketBytes)
} }
@@ -63,14 +67,14 @@ func TestBucketKey(t *testing.T) {
key: []byte("test"), key: []byte("test"),
expectedKeyBytes: []byte("hello/world/test"), expectedKeyBytes: []byte("hello/world/test"),
expectedKey: &Key{ expectedKey: &Key{
bucket: MakeBucket([]byte("hello"), []byte("world")), bucket: makeBucketJoin([]byte("hello"), []byte("world")),
suffix: []byte("test"), suffix: []byte("test"),
}, },
}, },
} }
for _, test := range tests { for _, test := range tests {
resultKey := MakeBucket(test.bucketByteSlices...).Key(test.key) resultKey := makeBucketJoin(test.bucketByteSlices...).Key(test.key)
if !reflect.DeepEqual(resultKey, test.expectedKey) { if !reflect.DeepEqual(resultKey, test.expectedKey) {
t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s", t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s",
test.expectedKeyBytes, resultKey) test.expectedKeyBytes, resultKey)

View File

@@ -95,7 +95,7 @@ func TestCursorSanity(t *testing.T) {
validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue) validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue)
// Seek to a non-existant key // Seek to a non-existant key
err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist"))) err = cursor.Seek(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil { if err == nil {
t.Fatalf("TestCursorSanity: Seek " + t.Fatalf("TestCursorSanity: Seek " +
"unexpectedly succeeded") "unexpectedly succeeded")
@@ -155,7 +155,7 @@ func TestCursorCloseErrors(t *testing.T) {
{ {
name: "Seek", name: "Seek",
function: func(cursor database.Cursor) error { function: func(cursor database.Cursor) error {
return cursor.Seek(database.MakeBucket().Key([]byte{})) return cursor.Seek(database.MakeBucket(nil).Key([]byte{}))
}, },
}, },
{ {
@@ -186,7 +186,7 @@ func TestCursorCloseErrors(t *testing.T) {
defer teardownFunc() defer teardownFunc()
// Open a new cursor // Open a new cursor
cursor, err := ldb.Cursor(database.MakeBucket()) cursor, err := ldb.Cursor(database.MakeBucket(nil))
if err != nil { if err != nil {
t.Fatalf("TestCursorCloseErrors: ldb.Cursor "+ t.Fatalf("TestCursorCloseErrors: ldb.Cursor "+
"unexpectedly failed: %s", err) "unexpectedly failed: %s", err)

View File

@@ -35,7 +35,7 @@ func TestLevelDBSanity(t *testing.T) {
defer teardownFunc() defer teardownFunc()
// Put something into the db // Put something into the db
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
putData := []byte("Hello world!") putData := []byte("Hello world!")
err := ldb.Put(key, putData) err := ldb.Put(key, putData)
if err != nil { if err != nil {
@@ -71,7 +71,7 @@ func TestLevelDBTransactionSanity(t *testing.T) {
} }
// Put something into the transaction // Put something into the transaction
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
putData := []byte("Hello world!") putData := []byte("Hello world!")
err = tx.Put(key, putData) err = tx.Put(key, putData)
if err != nil { if err != nil {
@@ -115,7 +115,7 @@ func TestLevelDBTransactionSanity(t *testing.T) {
// Case 2. Write directly to the DB and then read from a tx // Case 2. Write directly to the DB and then read from a tx
// Put something into the db // Put something into the db
key = database.MakeBucket().Key([]byte("key2")) key = database.MakeBucket(nil).Key([]byte("key2"))
putData = []byte("Goodbye world!") putData = []byte("Goodbye world!")
err = ldb.Put(key, putData) err = ldb.Put(key, putData)
if err != nil { if err != nil {

View File

@@ -20,14 +20,14 @@ func TestTransactionCloseErrors(t *testing.T) {
{ {
name: "Put", name: "Put",
function: func(dbTx *LevelDBTransaction) error { function: func(dbTx *LevelDBTransaction) error {
return dbTx.Put(database.MakeBucket().Key([]byte("key")), []byte("value")) return dbTx.Put(database.MakeBucket(nil).Key([]byte("key")), []byte("value"))
}, },
shouldReturnError: true, shouldReturnError: true,
}, },
{ {
name: "Get", name: "Get",
function: func(dbTx *LevelDBTransaction) error { function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Get(database.MakeBucket().Key([]byte("key"))) _, err := dbTx.Get(database.MakeBucket(nil).Key([]byte("key")))
return err return err
}, },
shouldReturnError: true, shouldReturnError: true,
@@ -35,7 +35,7 @@ func TestTransactionCloseErrors(t *testing.T) {
{ {
name: "Has", name: "Has",
function: func(dbTx *LevelDBTransaction) error { function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Has(database.MakeBucket().Key([]byte("key"))) _, err := dbTx.Has(database.MakeBucket(nil).Key([]byte("key")))
return err return err
}, },
shouldReturnError: true, shouldReturnError: true,
@@ -43,7 +43,7 @@ func TestTransactionCloseErrors(t *testing.T) {
{ {
name: "Delete", name: "Delete",
function: func(dbTx *LevelDBTransaction) error { function: func(dbTx *LevelDBTransaction) error {
return dbTx.Delete(database.MakeBucket().Key([]byte("key"))) return dbTx.Delete(database.MakeBucket(nil).Key([]byte("key")))
}, },
shouldReturnError: true, shouldReturnError: true,
}, },

View File

@@ -33,7 +33,7 @@ func testTransactionPut(t *testing.T, db database.Database, testName string) {
}() }()
// Put value1 into the transaction // Put value1 into the transaction
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value1 := []byte("value1") value1 := []byte("value1")
err = dbTx.Put(key, value1) err = dbTx.Put(key, value1)
if err != nil { if err != nil {
@@ -75,7 +75,7 @@ func TestTransactionGet(t *testing.T) {
func testTransactionGet(t *testing.T, db database.Database, testName string) { func testTransactionGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database // Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1")) key1 := database.MakeBucket(nil).Key([]byte("key1"))
value1 := []byte("value1") value1 := []byte("value1")
err := db.Put(key1, value1) err := db.Put(key1, value1)
if err != nil { if err != nil {
@@ -111,7 +111,7 @@ func testTransactionGet(t *testing.T, db database.Database, testName string) {
// Try getting a non-existent value and make sure // Try getting a non-existent value and make sure
// the returned error is ErrNotFound // the returned error is ErrNotFound
_, err = dbTx.Get(database.MakeBucket().Key([]byte("doesn't exist"))) _, err = dbTx.Get(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil { if err == nil {
t.Fatalf("%s: Get "+ t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName) "unexpectedly succeeded", testName)
@@ -122,7 +122,7 @@ func testTransactionGet(t *testing.T, db database.Database, testName string) {
} }
// Put a new value into the database outside of the transaction // Put a new value into the database outside of the transaction
key2 := database.MakeBucket().Key([]byte("key2")) key2 := database.MakeBucket(nil).Key([]byte("key2"))
value2 := []byte("value2") value2 := []byte("value2")
err = db.Put(key2, value2) err = db.Put(key2, value2)
if err != nil { if err != nil {
@@ -141,7 +141,7 @@ func testTransactionGet(t *testing.T, db database.Database, testName string) {
} }
// Put a new value into the transaction // Put a new value into the transaction
key3 := database.MakeBucket().Key([]byte("key3")) key3 := database.MakeBucket(nil).Key([]byte("key3"))
value3 := []byte("value3") value3 := []byte("value3")
err = dbTx.Put(key3, value3) err = dbTx.Put(key3, value3)
if err != nil { if err != nil {
@@ -167,7 +167,7 @@ func TestTransactionHas(t *testing.T) {
func testTransactionHas(t *testing.T, db database.Database, testName string) { func testTransactionHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database // Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1")) key1 := database.MakeBucket(nil).Key([]byte("key1"))
value1 := []byte("value1") value1 := []byte("value1")
err := db.Put(key1, value1) err := db.Put(key1, value1)
if err != nil { if err != nil {
@@ -201,7 +201,7 @@ func testTransactionHas(t *testing.T, db database.Database, testName string) {
} }
// Make sure that Has returns false for a non-existent value // Make sure that Has returns false for a non-existent value
exists, err = dbTx.Has(database.MakeBucket().Key([]byte("doesn't exist"))) exists, err = dbTx.Has(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err != nil { if err != nil {
t.Fatalf("%s: Has "+ t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err) "unexpectedly failed: %s", testName, err)
@@ -212,7 +212,7 @@ func testTransactionHas(t *testing.T, db database.Database, testName string) {
} }
// Put a new value into the database outside of the transaction // Put a new value into the database outside of the transaction
key2 := database.MakeBucket().Key([]byte("key2")) key2 := database.MakeBucket(nil).Key([]byte("key2"))
value2 := []byte("value2") value2 := []byte("value2")
err = db.Put(key2, value2) err = db.Put(key2, value2)
if err != nil { if err != nil {
@@ -238,7 +238,7 @@ func TestTransactionDelete(t *testing.T) {
func testTransactionDelete(t *testing.T, db database.Database, testName string) { func testTransactionDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database // Put a value into the database
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err := db.Put(key, value) err := db.Put(key, value)
if err != nil { if err != nil {
@@ -327,7 +327,7 @@ func testTransactionCommit(t *testing.T, db database.Database, testName string)
}() }()
// Put a value into the transaction // Put a value into the transaction
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err = dbTx.Put(key, value) err = dbTx.Put(key, value)
if err != nil { if err != nil {
@@ -388,7 +388,7 @@ func testTransactionRollback(t *testing.T, db database.Database, testName string
}() }()
// Put a value into the transaction // Put a value into the transaction
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err = dbTx.Put(key, value) err = dbTx.Put(key, value)
if err != nil { if err != nil {
@@ -448,7 +448,7 @@ func testTransactionRollbackUnlessClosed(t *testing.T, db database.Database, tes
}() }()
// Put a value into the transaction // Put a value into the transaction
key := database.MakeBucket().Key([]byte("key")) key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value") value := []byte("value")
err = dbTx.Put(key, value) err = dbTx.Put(key, value)
if err != nil { if err != nil {