Restructure database to prevent double-slashes in keys, causing bugs in cursors (#1432)

* Add TestValidateAndInsertPruningPointWithSideBlocks

* Optimize infrastracture bucket paths

* Update infrastracture tests

* Refactor the consensus/database layer

* Remove utils/dbkeys

* Use consensus/database in consensus instead of infrastructure

* Fix a bug in dbBucketToDatabaseBucket and MakeBucket combination

Co-authored-by: Elichai Turkel <elichai.turkel@gmail.com>
Co-authored-by: stasatdaglabs <39559713+stasatdaglabs@users.noreply.github.com>
This commit is contained in:
Svarog 2021-01-19 14:19:08 +02:00 committed by GitHub
parent a4adbabf96
commit ad9c213a06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 327 additions and 215 deletions

View File

@ -3,8 +3,7 @@ package consensus
import (
"sync"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"

View File

@ -6,9 +6,18 @@ import (
)
func dbBucketToDatabaseBucket(bucket model.DBBucket) *database.Bucket {
if bucket, ok := bucket.(dbBucket); ok {
return bucket.bucket
}
// This assumes that MakeBucket(src).Path() == src. which is not promised anywhere.
return database.MakeBucket(bucket.Path())
}
// MakeBucket creates a new Bucket using the given path of buckets.
func MakeBucket(path []byte) model.DBBucket {
return dbBucket{bucket: database.MakeBucket(path)}
}
type dbBucket struct {
bucket *database.Bucket
}
@ -26,5 +35,5 @@ func (d dbBucket) Path() []byte {
}
func newDBBucket(bucket *database.Bucket) model.DBBucket {
return &dbBucket{bucket: bucket}
return dbBucket{bucket: bucket}
}

View File

@ -0,0 +1,14 @@
package database
import (
"github.com/kaspanet/kaspad/infrastructure/db/database"
)
// ErrNotFound denotes that the requested item was not
// found in the database.
var ErrNotFound = database.ErrNotFound
// IsNotFoundError checks whether an error is an ErrNotFound.
func IsNotFoundError(err error) bool {
return database.IsNotFoundError(err)
}

View File

@ -6,6 +6,9 @@ import (
)
func dbKeyToDatabaseKey(key model.DBKey) *database.Key {
if key, ok := key.(dbKey); ok {
return key.key
}
return dbBucketToDatabaseBucket(key.Bucket()).Key(key.Suffix())
}

View File

@ -1,15 +1,15 @@
package acceptancedatastore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"google.golang.org/protobuf/proto"
)
var bucket = dbkeys.MakeBucket([]byte("acceptance-data"))
var bucket = database.MakeBucket([]byte("acceptance-data"))
// acceptanceDataStore represents a store of AcceptanceData
type acceptanceDataStore struct {

View File

@ -2,15 +2,15 @@ package blockheaderstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-headers"))
var countKey = dbkeys.MakeBucket().Key([]byte("block-headers-count"))
var bucket = database.MakeBucket([]byte("block-headers"))
var countKey = database.MakeBucket(nil).Key([]byte("block-headers-count"))
// blockHeaderStore represents a store of blocks
type blockHeaderStore struct {

View File

@ -2,14 +2,14 @@ package blockrelationstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-relations"))
var bucket = database.MakeBucket([]byte("block-relations"))
// blockRelationStore represents a store of BlockRelations
type blockRelationStore struct {

View File

@ -2,14 +2,14 @@ package blockstatusstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-statuses"))
var bucket = database.MakeBucket([]byte("block-statuses"))
// blockStatusStore represents a store of BlockStatuses
type blockStatusStore struct {

View File

@ -2,15 +2,15 @@ package blockstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("blocks"))
var countKey = dbkeys.MakeBucket().Key([]byte("blocks-count"))
var bucket = database.MakeBucket([]byte("blocks"))
var countKey = database.MakeBucket(nil).Key([]byte("blocks-count"))
// blockStore represents a store of blocks
type blockStore struct {

View File

@ -2,13 +2,13 @@ package consensusstatestore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var tipsKey = dbkeys.MakeBucket().Key([]byte("tips"))
var tipsKey = database.MakeBucket(nil).Key([]byte("tips"))
func (css *consensusStateStore) Tips(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
if css.tipsStaging != nil {

View File

@ -1,14 +1,14 @@
package consensusstatestore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/utxo"
"github.com/pkg/errors"
)
var utxoSetBucket = dbkeys.MakeBucket([]byte("virtual-utxo-set"))
var utxoSetBucket = database.MakeBucket([]byte("virtual-utxo-set"))
func utxoKey(outpoint *externalapi.DomainOutpoint) (model.DBKey, error) {
serializedOutpoint, err := serializeOutpoint(outpoint)

View File

@ -2,13 +2,13 @@ package consensusstatestore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var virtualDiffParentsKey = dbkeys.MakeBucket().Key([]byte("virtual-diff-parents"))
var virtualDiffParentsKey = database.MakeBucket(nil).Key([]byte("virtual-diff-parents"))
func (css *consensusStateStore) VirtualDiffParents(dbContext model.DBReader) ([]*externalapi.DomainHash, error) {
if css.virtualDiffParentsStaging != nil {

View File

@ -1,13 +1,13 @@
package finalitystore
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("finality-points"))
var bucket = database.MakeBucket([]byte("finality-points"))
type finalityStore struct {
staging map[externalapi.DomainHash]*externalapi.DomainHash

View File

@ -2,14 +2,14 @@ package ghostdagdatastore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("block-ghostdag-data"))
var bucket = database.MakeBucket([]byte("block-ghostdag-data"))
// ghostdagDataStore represents a store of BlockGHOSTDAGData
type ghostdagDataStore struct {

View File

@ -2,19 +2,18 @@ package headersselectedchainstore
import (
"encoding/binary"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/binaryserialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucacheuint64tohash"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
)
var bucketChainBlockHashByIndex = dbkeys.MakeBucket([]byte("chain-block-hash-by-index"))
var bucketChainBlockIndexByHash = dbkeys.MakeBucket([]byte("chain-block-index-by-hash"))
var highestChainBlockIndexKey = dbkeys.MakeBucket().Key([]byte("highest-chain-block-index"))
var bucketChainBlockHashByIndex = database.MakeBucket([]byte("chain-block-hash-by-index"))
var bucketChainBlockIndexByHash = database.MakeBucket([]byte("chain-block-index-by-hash"))
var highestChainBlockIndexKey = database.MakeBucket(nil).Key([]byte("highest-chain-block-index"))
type headersSelectedChainStore struct {
stagingAddedByHash map[externalapi.DomainHash]uint64

View File

@ -2,13 +2,13 @@ package headersselectedtipstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var headerSelectedTipKey = dbkeys.MakeBucket().Key([]byte("headers-selected-tip"))
var headerSelectedTipKey = database.MakeBucket(nil).Key([]byte("headers-selected-tip"))
type headerSelectedTipStore struct {
staging *externalapi.DomainHash

View File

@ -2,14 +2,14 @@ package multisetstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var bucket = dbkeys.MakeBucket([]byte("multisets"))
var bucket = database.MakeBucket([]byte("multisets"))
// multisetStore represents a store of Multisets
type multisetStore struct {

View File

@ -2,15 +2,15 @@ package pruningstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
)
var pruningBlockHashKey = dbkeys.MakeBucket().Key([]byte("pruning-block-hash"))
var candidatePruningPointHashKey = dbkeys.MakeBucket().Key([]byte("candidate-pruning-point-hash"))
var pruningSerializedUTXOSetKey = dbkeys.MakeBucket().Key([]byte("pruning-utxo-set"))
var pruningBlockHashKey = database.MakeBucket(nil).Key([]byte("pruning-block-hash"))
var candidatePruningPointHashKey = database.MakeBucket(nil).Key([]byte("candidate-pruning-point-hash"))
var pruningSerializedUTXOSetKey = database.MakeBucket(nil).Key([]byte("pruning-utxo-set"))
// pruningStore represents a store for the current pruning state
type pruningStore struct {

View File

@ -2,15 +2,15 @@ package reachabilitydatastore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
)
var reachabilityDataBucket = dbkeys.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = dbkeys.MakeBucket().Key([]byte("reachability-reindex-root"))
var reachabilityDataBucket = database.MakeBucket([]byte("reachability-data"))
var reachabilityReindexRootKey = database.MakeBucket(nil).Key([]byte("reachability-reindex-root"))
// reachabilityDataStore represents a store of ReachabilityData
type reachabilityDataStore struct {

View File

@ -2,16 +2,16 @@ package utxodiffstore
import (
"github.com/golang/protobuf/proto"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/database/serialization"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/dbkeys"
"github.com/kaspanet/kaspad/domain/consensus/utils/lrucache"
"github.com/pkg/errors"
)
var utxoDiffBucket = dbkeys.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = dbkeys.MakeBucket([]byte("utxo-diff-children"))
var utxoDiffBucket = database.MakeBucket([]byte("utxo-diffs"))
var utxoDiffChildBucket = database.MakeBucket([]byte("utxo-diff-children"))
// utxoDiffStore represents a store of UTXODiffs
type utxoDiffStore struct {

View File

@ -1,8 +1,12 @@
package blockprocessor_test
import (
"testing"
"time"
"github.com/kaspanet/kaspad/domain/consensus"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/model/testapi"
"github.com/kaspanet/kaspad/domain/consensus/ruleerrors"
"github.com/kaspanet/kaspad/domain/consensus/utils/consensushashing"
"github.com/kaspanet/kaspad/domain/consensus/utils/testutils"
@ -11,10 +15,30 @@ import (
"github.com/kaspanet/kaspad/domain/dagconfig"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
"testing"
"time"
)
func addBlock(tcSyncer, tcSyncee testapi.TestConsensus, parentHashes []*externalapi.DomainHash, t *testing.T) *externalapi.DomainHash {
block, _, err := tcSyncer.BuildBlockWithParents(parentHashes, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, err = tcSyncer.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{
Header: block.Header,
Transactions: nil,
})
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
return consensushashing.BlockHash(block)
}
func TestValidateAndInsertPruningPoint(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
// This is done to reduce the pruning depth to 6 blocks
@ -36,43 +60,21 @@ func TestValidateAndInsertPruningPoint(t *testing.T) {
}
defer teardownSyncee(false)
addBlock := func(parentHashes []*externalapi.DomainHash) *externalapi.DomainHash {
block, _, err := tcSyncer.BuildBlockWithParents(parentHashes, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, err = tcSyncer.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(&externalapi.DomainBlock{
Header: block.Header,
Transactions: nil,
})
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
return consensushashing.BlockHash(block)
}
tipHash := params.GenesisHash
for i := 0; i < finalityDepth-2; i++ {
tipHash = addBlock([]*externalapi.DomainHash{tipHash})
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
}
// Add block in the anticone of the pruning point to test such situation
pruningPointAnticoneBlock := addBlock([]*externalapi.DomainHash{tipHash})
tipHash = addBlock([]*externalapi.DomainHash{tipHash})
nextPruningPoint := addBlock([]*externalapi.DomainHash{tipHash})
pruningPointAnticoneBlock := addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
nextPruningPoint := addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
tipHash = addBlock([]*externalapi.DomainHash{pruningPointAnticoneBlock, nextPruningPoint})
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{pruningPointAnticoneBlock, nextPruningPoint}, t)
// Add blocks until the pruning point changes
for {
tipHash = addBlock([]*externalapi.DomainHash{tipHash})
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
pruningPoint, err := tcSyncer.PruningPoint()
if err != nil {
@ -187,6 +189,179 @@ func TestValidateAndInsertPruningPoint(t *testing.T) {
})
}
// TestValidateAndInsertPruningPointWithSideBlocks makes sure that when a node applies a UTXO-Set downloaded during
// IBD, while it already has a non-empty UTXO-Set originating from blocks mined on top of genesis - the resulting
// UTXO set is correct
func TestValidateAndInsertPruningPointWithSideBlocks(t *testing.T) {
testutils.ForAllNets(t, true, func(t *testing.T, params *dagconfig.Params) {
// This is done to reduce the pruning depth to 6 blocks
finalityDepth := 3
params.FinalityDuration = time.Duration(finalityDepth) * params.TargetTimePerBlock
params.K = 0
factory := consensus.NewFactory()
tcSyncer, teardownSyncer, err := factory.NewTestConsensus(params, false, "TestValidateAndInsertPruningPointSyncer")
if err != nil {
t.Fatalf("Error setting up tcSyncer: %+v", err)
}
defer teardownSyncer(false)
tcSyncee, teardownSyncee, err := factory.NewTestConsensus(params, false, "TestValidateAndInsertPruningPointSyncee")
if err != nil {
t.Fatalf("Error setting up tcSyncee: %+v", err)
}
defer teardownSyncee(false)
// Mine 2 block in the syncee on top of genesis
side, _, err := tcSyncee.AddBlock([]*externalapi.DomainHash{params.GenesisHash}, &externalapi.DomainCoinbaseData{ScriptPublicKey: &externalapi.ScriptPublicKey{}, ExtraData: []byte{1, 2}}, nil)
if err != nil {
t.Fatal(err)
}
_, _, err = tcSyncee.AddBlock([]*externalapi.DomainHash{side}, nil, nil)
if err != nil {
t.Fatal(err)
}
tipHash := params.GenesisHash
for i := 0; i < finalityDepth-2; i++ {
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
}
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
nextPruningPoint := addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{nextPruningPoint}, t)
// Add blocks until the pruning point changes
for {
tipHash = addBlock(tcSyncer, tcSyncee, []*externalapi.DomainHash{tipHash}, t)
pruningPoint, err := tcSyncer.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !pruningPoint.Equal(params.GenesisHash) {
break
}
}
pruningPoint, err := tcSyncer.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !pruningPoint.Equal(nextPruningPoint) {
t.Fatalf("Unexpected pruning point %s", pruningPoint)
}
pruningPointUTXOSet, err := tcSyncer.GetPruningPointUTXOSet(pruningPoint)
if err != nil {
t.Fatalf("GetPruningPointUTXOSet: %+v", err)
}
// Check that ValidateAndInsertPruningPoint works.
pruningPointBlock, err := tcSyncer.GetBlock(pruningPoint)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
err = tcSyncee.ValidateAndInsertPruningPoint(pruningPointBlock, pruningPointUTXOSet)
if err != nil {
t.Fatalf("ValidateAndInsertPruningPoint: %+v", err)
}
// Insert the rest of the blocks atop pruning point
virtualSelectedParent, err := tcSyncer.GetVirtualSelectedParent()
if err != nil {
t.Fatalf("GetVirtualSelectedParent: %+v", err)
}
missingBlockBodyHashes, err := tcSyncee.GetMissingBlockBodyHashes(virtualSelectedParent)
if err != nil {
t.Fatalf("GetMissingBlockBodyHashes: %+v", err)
}
for _, missingHash := range missingBlockBodyHashes {
block, err := tcSyncer.GetBlock(missingHash)
if err != nil {
t.Fatalf("GetBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
}
// Verify that syncee and syncer tips are equal
synceeTips, err := tcSyncee.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
syncerTips, err := tcSyncer.Tips()
if err != nil {
t.Fatalf("Tips: %+v", err)
}
if !externalapi.HashesEqual(synceeTips, syncerTips) {
t.Fatalf("Syncee's tips are %s while syncer's are %s", synceeTips, syncerTips)
}
// Verify that syncee and syncer pruning points are equal
synceePruningPoint, err := tcSyncee.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !synceePruningPoint.Equal(pruningPoint) {
t.Fatalf("The syncee pruning point has not changed as exepcted")
}
pruningPointOld := pruningPoint
// Add blocks until the pruning point moves, and verify it moved to the same point on both syncer and syncee
for {
block, _, err := tcSyncer.BuildBlockWithParents([]*externalapi.DomainHash{tipHash}, nil, nil)
if err != nil {
t.Fatalf("BuildBlockWithParents: %+v", err)
}
_, err = tcSyncer.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
_, err = tcSyncee.ValidateAndInsertBlock(block)
if err != nil {
t.Fatalf("ValidateAndInsertBlock: %+v", err)
}
tipHash = consensushashing.BlockHash(block)
pruningPoint, err = tcSyncer.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !pruningPoint.Equal(pruningPointOld) {
break
}
}
synceePruningPoint, err = tcSyncee.PruningPoint()
if err != nil {
t.Fatalf("PruningPoint: %+v", err)
}
if !synceePruningPoint.Equal(pruningPoint) {
t.Fatalf("The syncee pruning point(%s) is not equal to syncer pruning point (%s) after it moved. "+
"pruning point before move: %s", synceePruningPoint, pruningPoint, pruningPointOld)
}
})
}
type fakeUTXOSetIterator struct {
nextCalled bool
}

View File

@ -3,9 +3,9 @@ package finalitymanager
import (
"errors"
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
)
type finalityManager struct {

View File

@ -1,10 +1,10 @@
package reachabilitymanager
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/domain/consensus/utils/reachabilitydata"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
)

View File

@ -1,9 +1,9 @@
package syncmanager
import (
"github.com/kaspanet/kaspad/domain/consensus/database"
"github.com/kaspanet/kaspad/domain/consensus/model"
"github.com/kaspanet/kaspad/domain/consensus/model/externalapi"
"github.com/kaspanet/kaspad/infrastructure/db/database"
"github.com/pkg/errors"
)

View File

@ -1,86 +0,0 @@
package dbkeys
import (
"bytes"
"encoding/hex"
"github.com/kaspanet/kaspad/domain/consensus/model"
)
var bucketSeparator = []byte("/")
// Key is a helper type meant to combine prefix
// and suffix into a single database key.
type Key struct {
bucket *Bucket
suffix []byte
}
// Bytes returns the full key bytes that are consisted
// from the bucket path concatenated to the suffix.
func (k *Key) Bytes() []byte {
bucketPath := k.bucket.Path()
keyBytes := make([]byte, len(bucketPath)+len(k.suffix))
copy(keyBytes, bucketPath)
copy(keyBytes[len(bucketPath):], k.suffix)
return keyBytes
}
func (k *Key) String() string {
return hex.EncodeToString(k.Bytes())
}
// Bucket returns the key bucket.
func (k *Key) Bucket() model.DBBucket {
return k.bucket
}
// Suffix returns the key suffix.
func (k *Key) Suffix() []byte {
return k.suffix
}
// newKey returns a new key composed
// of the given bucket and suffix
func newKey(bucket *Bucket, suffix []byte) model.DBKey {
return &Key{bucket: bucket, suffix: suffix}
}
// Bucket is a helper type meant to combine buckets
// and sub-buckets that can be used to create database
// keys and prefix-based cursors.
type Bucket struct {
path [][]byte
}
// MakeBucket creates a new Bucket using the given path
// of buckets.
func MakeBucket(path ...[]byte) model.DBBucket {
return &Bucket{path: path}
}
// Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) model.DBBucket {
newPath := make([][]byte, len(b.path)+1)
copy(newPath, b.path)
copy(newPath[len(b.path):], [][]byte{bucketBytes})
return MakeBucket(newPath...)
}
// Key returns a key in the current bucket with the
// given suffix.
func (b *Bucket) Key(suffix []byte) model.DBKey {
return newKey(b, suffix)
}
// Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, bucketSeparator)
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(bucketSeparator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], bucketSeparator)
return bucketPathWithFinalSeparator
}

View File

@ -67,7 +67,7 @@ func populateDatabaseForTest(t *testing.T, db database.Database, testName string
// Prepare a list of key/value pairs
entries := make([]keyValuePair, 10)
for i := 0; i < 10; i++ {
key := database.MakeBucket().Key([]byte(fmt.Sprintf("key%d", i)))
key := database.MakeBucket(nil).Key([]byte(fmt.Sprintf("key%d", i)))
value := []byte("value")
entries[i] = keyValuePair{key: key, value: value}
}

View File

@ -15,7 +15,7 @@ import (
)
func prepareCursorForTest(t *testing.T, db database.Database, testName string) database.Cursor {
cursor, err := db.Cursor(database.MakeBucket())
cursor, err := db.Cursor(database.MakeBucket(nil))
if err != nil {
t.Fatalf("%s: Cursor unexpectedly "+
"failed: %s", testName, err)
@ -241,7 +241,7 @@ func testCursorSeek(t *testing.T, db database.Database, testName string) {
// Seek to a value that doesn't exist and make sure that
// the returned error is ErrNotFound
err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist")))
err = cursor.Seek(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Seek unexpectedly "+
"succeeded", testName)
@ -274,7 +274,7 @@ func testCursorCloseErrors(t *testing.T, db database.Database, testName string)
{
name: "Seek",
function: func() error {
return cursor.Seek(database.MakeBucket().Key([]byte{}))
return cursor.Seek(database.MakeBucket(nil).Key([]byte{}))
},
},
{

View File

@ -18,7 +18,7 @@ func TestDatabasePut(t *testing.T) {
func testDatabasePut(t *testing.T, db database.Database, testName string) {
// Put value1 into the database
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value1 := []byte("value1")
err := db.Put(key, value1)
if err != nil {
@ -65,7 +65,7 @@ func TestDatabaseGet(t *testing.T) {
func testDatabaseGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
@ -87,7 +87,7 @@ func testDatabaseGet(t *testing.T, db database.Database, testName string) {
// Try getting a non-existent value and make sure
// the returned error is ErrNotFound
_, err = db.Get(database.MakeBucket().Key([]byte("doesn't exist")))
_, err = db.Get(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
@ -104,7 +104,7 @@ func TestDatabaseHas(t *testing.T) {
func testDatabaseHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
@ -124,7 +124,7 @@ func testDatabaseHas(t *testing.T, db database.Database, testName string) {
}
// Make sure that Has returns false for a non-existent value
exists, err = db.Has(database.MakeBucket().Key([]byte("doesn't exist")))
exists, err = db.Has(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
@ -141,7 +141,7 @@ func TestDatabaseDelete(t *testing.T) {
func testDatabaseDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {

View File

@ -1,11 +1,10 @@
package database
import (
"bytes"
"encoding/hex"
)
var bucketSeparator = []byte("/")
var bucketSeparator = byte('/')
// Key is a helper type meant to combine prefix
// and suffix into a single database key.
@ -48,23 +47,25 @@ func newKey(bucket *Bucket, suffix []byte) *Key {
// and sub-buckets that can be used to create database
// keys and prefix-based cursors.
type Bucket struct {
path [][]byte
path []byte
}
// MakeBucket creates a new Bucket using the given path
// of buckets.
func MakeBucket(path ...[]byte) *Bucket {
func MakeBucket(path []byte) *Bucket {
if len(path) > 0 && path[len(path)-1] != bucketSeparator {
path = append(path, bucketSeparator)
}
return &Bucket{path: path}
}
// Bucket returns the sub-bucket of the current bucket
// defined by bucketBytes.
func (b *Bucket) Bucket(bucketBytes []byte) *Bucket {
newPath := make([][]byte, len(b.path)+1)
copy(newPath, b.path)
copy(newPath[len(b.path):], [][]byte{bucketBytes})
return MakeBucket(newPath...)
newPath := make([]byte, 0, len(b.path)+len(bucketBytes)+1) // +1 for the separator in MakeBucket
newPath = append(newPath, b.path...)
newPath = append(newPath, bucketBytes...)
return MakeBucket(newPath)
}
// Key returns a key in the current bucket with the
@ -75,11 +76,5 @@ func (b *Bucket) Key(suffix []byte) *Key {
// Path returns the full path of the current bucket.
func (b *Bucket) Path() []byte {
bucketPath := bytes.Join(b.path, bucketSeparator)
bucketPathWithFinalSeparator := make([]byte, len(bucketPath)+len(bucketSeparator))
copy(bucketPathWithFinalSeparator, bucketPath)
copy(bucketPathWithFinalSeparator[len(bucketPath):], bucketSeparator)
return bucketPathWithFinalSeparator
return b.path
}

View File

@ -6,6 +6,10 @@ import (
"testing"
)
func makeBucketJoin(path ...[]byte) *Bucket {
return MakeBucket(bytes.Join(path, []byte{bucketSeparator}))
}
func TestBucketPath(t *testing.T) {
tests := []struct {
bucketByteSlices [][]byte
@ -23,14 +27,14 @@ func TestBucketPath(t *testing.T) {
for _, test := range tests {
// Build a result using the MakeBucket function alone
resultKey := MakeBucket(test.bucketByteSlices...).Path()
resultKey := makeBucketJoin(test.bucketByteSlices...).Path()
if !reflect.DeepEqual(resultKey, test.expectedPath) {
t.Errorf("TestBucketPath: got wrong path using MakeBucket. "+
"Want: %s, got: %s", string(test.expectedPath), string(resultKey))
}
// Build a result using sub-Bucket calls
bucket := MakeBucket()
bucket := MakeBucket(nil)
for _, bucketBytes := range test.bucketByteSlices {
bucket = bucket.Bucket(bucketBytes)
}
@ -63,14 +67,14 @@ func TestBucketKey(t *testing.T) {
key: []byte("test"),
expectedKeyBytes: []byte("hello/world/test"),
expectedKey: &Key{
bucket: MakeBucket([]byte("hello"), []byte("world")),
bucket: makeBucketJoin([]byte("hello"), []byte("world")),
suffix: []byte("test"),
},
},
}
for _, test := range tests {
resultKey := MakeBucket(test.bucketByteSlices...).Key(test.key)
resultKey := makeBucketJoin(test.bucketByteSlices...).Key(test.key)
if !reflect.DeepEqual(resultKey, test.expectedKey) {
t.Errorf("TestBucketKey: got wrong key. Want: %s, got: %s",
test.expectedKeyBytes, resultKey)

View File

@ -95,7 +95,7 @@ func TestCursorSanity(t *testing.T) {
validateCurrentCursorKeyAndValue(t, "TestCursorSanity", cursor, expectedKey, expectedValue)
// Seek to a non-existant key
err = cursor.Seek(database.MakeBucket().Key([]byte("doesn't exist")))
err = cursor.Seek(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("TestCursorSanity: Seek " +
"unexpectedly succeeded")
@ -155,7 +155,7 @@ func TestCursorCloseErrors(t *testing.T) {
{
name: "Seek",
function: func(cursor database.Cursor) error {
return cursor.Seek(database.MakeBucket().Key([]byte{}))
return cursor.Seek(database.MakeBucket(nil).Key([]byte{}))
},
},
{
@ -186,7 +186,7 @@ func TestCursorCloseErrors(t *testing.T) {
defer teardownFunc()
// Open a new cursor
cursor, err := ldb.Cursor(database.MakeBucket())
cursor, err := ldb.Cursor(database.MakeBucket(nil))
if err != nil {
t.Fatalf("TestCursorCloseErrors: ldb.Cursor "+
"unexpectedly failed: %s", err)

View File

@ -35,7 +35,7 @@ func TestLevelDBSanity(t *testing.T) {
defer teardownFunc()
// Put something into the db
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
putData := []byte("Hello world!")
err := ldb.Put(key, putData)
if err != nil {
@ -71,7 +71,7 @@ func TestLevelDBTransactionSanity(t *testing.T) {
}
// Put something into the transaction
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
putData := []byte("Hello world!")
err = tx.Put(key, putData)
if err != nil {
@ -115,7 +115,7 @@ func TestLevelDBTransactionSanity(t *testing.T) {
// Case 2. Write directly to the DB and then read from a tx
// Put something into the db
key = database.MakeBucket().Key([]byte("key2"))
key = database.MakeBucket(nil).Key([]byte("key2"))
putData = []byte("Goodbye world!")
err = ldb.Put(key, putData)
if err != nil {

View File

@ -20,14 +20,14 @@ func TestTransactionCloseErrors(t *testing.T) {
{
name: "Put",
function: func(dbTx *LevelDBTransaction) error {
return dbTx.Put(database.MakeBucket().Key([]byte("key")), []byte("value"))
return dbTx.Put(database.MakeBucket(nil).Key([]byte("key")), []byte("value"))
},
shouldReturnError: true,
},
{
name: "Get",
function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Get(database.MakeBucket().Key([]byte("key")))
_, err := dbTx.Get(database.MakeBucket(nil).Key([]byte("key")))
return err
},
shouldReturnError: true,
@ -35,7 +35,7 @@ func TestTransactionCloseErrors(t *testing.T) {
{
name: "Has",
function: func(dbTx *LevelDBTransaction) error {
_, err := dbTx.Has(database.MakeBucket().Key([]byte("key")))
_, err := dbTx.Has(database.MakeBucket(nil).Key([]byte("key")))
return err
},
shouldReturnError: true,
@ -43,7 +43,7 @@ func TestTransactionCloseErrors(t *testing.T) {
{
name: "Delete",
function: func(dbTx *LevelDBTransaction) error {
return dbTx.Delete(database.MakeBucket().Key([]byte("key")))
return dbTx.Delete(database.MakeBucket(nil).Key([]byte("key")))
},
shouldReturnError: true,
},

View File

@ -33,7 +33,7 @@ func testTransactionPut(t *testing.T, db database.Database, testName string) {
}()
// Put value1 into the transaction
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value1 := []byte("value1")
err = dbTx.Put(key, value1)
if err != nil {
@ -75,7 +75,7 @@ func TestTransactionGet(t *testing.T) {
func testTransactionGet(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1"))
key1 := database.MakeBucket(nil).Key([]byte("key1"))
value1 := []byte("value1")
err := db.Put(key1, value1)
if err != nil {
@ -111,7 +111,7 @@ func testTransactionGet(t *testing.T, db database.Database, testName string) {
// Try getting a non-existent value and make sure
// the returned error is ErrNotFound
_, err = dbTx.Get(database.MakeBucket().Key([]byte("doesn't exist")))
_, err = dbTx.Get(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err == nil {
t.Fatalf("%s: Get "+
"unexpectedly succeeded", testName)
@ -122,7 +122,7 @@ func testTransactionGet(t *testing.T, db database.Database, testName string) {
}
// Put a new value into the database outside of the transaction
key2 := database.MakeBucket().Key([]byte("key2"))
key2 := database.MakeBucket(nil).Key([]byte("key2"))
value2 := []byte("value2")
err = db.Put(key2, value2)
if err != nil {
@ -141,7 +141,7 @@ func testTransactionGet(t *testing.T, db database.Database, testName string) {
}
// Put a new value into the transaction
key3 := database.MakeBucket().Key([]byte("key3"))
key3 := database.MakeBucket(nil).Key([]byte("key3"))
value3 := []byte("value3")
err = dbTx.Put(key3, value3)
if err != nil {
@ -167,7 +167,7 @@ func TestTransactionHas(t *testing.T) {
func testTransactionHas(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key1 := database.MakeBucket().Key([]byte("key1"))
key1 := database.MakeBucket(nil).Key([]byte("key1"))
value1 := []byte("value1")
err := db.Put(key1, value1)
if err != nil {
@ -201,7 +201,7 @@ func testTransactionHas(t *testing.T, db database.Database, testName string) {
}
// Make sure that Has returns false for a non-existent value
exists, err = dbTx.Has(database.MakeBucket().Key([]byte("doesn't exist")))
exists, err = dbTx.Has(database.MakeBucket(nil).Key([]byte("doesn't exist")))
if err != nil {
t.Fatalf("%s: Has "+
"unexpectedly failed: %s", testName, err)
@ -212,7 +212,7 @@ func testTransactionHas(t *testing.T, db database.Database, testName string) {
}
// Put a new value into the database outside of the transaction
key2 := database.MakeBucket().Key([]byte("key2"))
key2 := database.MakeBucket(nil).Key([]byte("key2"))
value2 := []byte("value2")
err = db.Put(key2, value2)
if err != nil {
@ -238,7 +238,7 @@ func TestTransactionDelete(t *testing.T) {
func testTransactionDelete(t *testing.T, db database.Database, testName string) {
// Put a value into the database
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err := db.Put(key, value)
if err != nil {
@ -327,7 +327,7 @@ func testTransactionCommit(t *testing.T, db database.Database, testName string)
}()
// Put a value into the transaction
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err = dbTx.Put(key, value)
if err != nil {
@ -388,7 +388,7 @@ func testTransactionRollback(t *testing.T, db database.Database, testName string
}()
// Put a value into the transaction
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err = dbTx.Put(key, value)
if err != nil {
@ -448,7 +448,7 @@ func testTransactionRollbackUnlessClosed(t *testing.T, db database.Database, tes
}()
// Put a value into the transaction
key := database.MakeBucket().Key([]byte("key"))
key := database.MakeBucket(nil).Key([]byte("key"))
value := []byte("value")
err = dbTx.Put(key, value)
if err != nil {